diff --git a/BUILD.bazel b/BUILD.bazel index 7f7441ab55..5fd776494f 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -1243,8 +1243,8 @@ filegroup( "src/codegen/tick-counter.h", "src/codegen/tnode.cc", "src/codegen/tnode.h", - "src/codegen/turbo-assembler.cc", - "src/codegen/turbo-assembler.h", + "src/codegen/macro-assembler-base.cc", + "src/codegen/macro-assembler-base.h", "src/codegen/unoptimized-compilation-info.cc", "src/codegen/unoptimized-compilation-info.h", "src/common/assert-scope.cc", diff --git a/BUILD.gn b/BUILD.gn index 8d1a7b496d..8ed736b5c0 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -2833,6 +2833,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/interface-descriptors.h", "src/codegen/label.h", "src/codegen/machine-type.h", + "src/codegen/macro-assembler-base.h", "src/codegen/macro-assembler-inl.h", "src/codegen/macro-assembler.h", "src/codegen/maglev-safepoint-table.h", @@ -2853,7 +2854,6 @@ v8_header_set("v8_internal_headers") { "src/codegen/source-position.h", "src/codegen/tick-counter.h", "src/codegen/tnode.h", - "src/codegen/turbo-assembler.h", "src/codegen/unoptimized-compilation-info.h", "src/common/assert-scope.h", "src/common/checks.h", @@ -4581,6 +4581,7 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/handler-table.cc", "src/codegen/interface-descriptors.cc", "src/codegen/machine-type.cc", + "src/codegen/macro-assembler-base.cc", "src/codegen/maglev-safepoint-table.cc", "src/codegen/optimized-compilation-info.cc", "src/codegen/pending-optimization-table.cc", @@ -4591,7 +4592,6 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/source-position.cc", "src/codegen/tick-counter.cc", "src/codegen/tnode.cc", - "src/codegen/turbo-assembler.cc", "src/codegen/unoptimized-compilation-info.cc", "src/common/assert-scope.cc", "src/common/code-memory-access.cc", @@ -5163,7 +5163,7 @@ v8_source_set("v8_base_without_compiler") { if (v8_enable_webassembly) { # Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux, # Mac, and Windows. - if ((current_cpu == "arm64" && is_mac) || + if ((current_cpu == "arm64" && is_apple) || (current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) { sources += [ "src/trap-handler/handler-inside-posix.cc", diff --git a/DEPS b/DEPS index c7baa99eb6..c50382841f 100644 --- a/DEPS +++ b/DEPS @@ -63,12 +63,12 @@ vars = { 'ninja_version': 'version:2@1.11.1.chromium.6', # luci-go CIPD package version. - 'luci_go': 'git_revision:c41d94e382727fc5276cd2771741990543fce337', + 'luci_go': 'git_revision:46eca1e3a280c340bf58f967aaded13c87ca3859', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:11.20230131.1.1', + 'fuchsia_version': 'version:11.20230202.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -106,11 +106,11 @@ vars = { deps = { 'base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c', + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '05a225a3e0bbd6fb6a9cac02d482ab784194411d', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e0df145ecb560e48381b6dccf3b9c8b31aa95bcd', + Var('chromium_url') + '/chromium/src/build.git' + '@' + 'd0fad164969ab7f41f163f9ee738ea692f43df53', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '295c6e5037e358904aef73a21409896d58547ba6', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '5408fe0e010a7d36bb2684d5f38df67dcdfe31de', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'f97059df7f8b205064625cdb5f97b56668a125ef', 'buildtools/linux64': { @@ -134,7 +134,7 @@ deps = { 'condition': 'host_os == "mac"', }, 'buildtools/third_party/libc++/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '59bae40d835ae4eabaddbef781f5e3b778dd7907', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '549781a48cef7a038cadbe8ae9034c2d63685d9a', 'buildtools/third_party/libc++abi/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'b74d7716111d7eda5c03cb8f5dfc940e1c2c0030', 'buildtools/third_party/libunwind/trunk': @@ -164,7 +164,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f00d4118dba5d266d1611ba2cd4e995d3e4b523a', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'e7364ea7dc36a466edb2db5ef0a8e66da8dabb7d', 'third_party/android_ndk': { 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d', 'condition': 'checkout_android', @@ -212,7 +212,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '5a468ccd919e16a29bb3121e3c90f27bf8745942', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'd0d703ea303c91f3afe39ebf8d2d4c9342accedc', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -220,7 +220,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '3d072ab6fb49fd3d2116a41cee66d47c3d409299', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ef2d011ad3041801565aa8c6d1418cc82c0ddb2e', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { @@ -237,9 +237,9 @@ deps = { 'third_party/googletest/src': Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '2c51e5cc7e0a06cd4cd7cb2ddbac445af9b475ba', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '266a46937f05303da1ac4c68f2c94f9a1caa3f76', 'third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '09ba70cfb2c0d01c60684660e357ae200caf2968', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '63d81e44712888bf70d574d5a96fa591994b9acc', 'third_party/ittapi': { # Force checkout ittapi libraries to pass v8 header includes check on # bots that has check_v8_header_includes enabled. diff --git a/WATCHLISTS b/WATCHLISTS index 9da8c5b6c0..021be1f710 100644 --- a/WATCHLISTS +++ b/WATCHLISTS @@ -104,6 +104,9 @@ 'trap-handler': { 'filepath': 'src/trap-handler/', }, + 'tests': { + 'filepath': 'test/', + }, }, 'WATCHLISTS': { @@ -124,6 +127,7 @@ ], 'feature_shipping_status': [ 'hablich@chromium.org', + 'saelo+watch@chromium.org', ], 'heap_changes': [ 'hpayer@chromium.org', @@ -176,5 +180,8 @@ 'mark@chromium.org', 'mseaborn@chromium.org', ], + 'tests': [ + 'almuthanna+watch@chromium.org', + ], }, } diff --git a/include/cppgc/internal/member-storage.h b/include/cppgc/internal/member-storage.h index cc803bdcb1..5c78e80f9b 100644 --- a/include/cppgc/internal/member-storage.h +++ b/include/cppgc/internal/member-storage.h @@ -17,6 +17,11 @@ namespace cppgc { namespace internal { +enum class WriteBarrierSlotType { + kCompressed, + kUncompressed, +}; + #if defined(CPPGC_POINTER_COMPRESSION) #if defined(__clang__) @@ -64,6 +69,8 @@ class CageBaseGlobal final { class V8_TRIVIAL_ABI CompressedPointer final { public: using IntegralType = uint32_t; + static constexpr auto kWriteBarrierSlotType = + WriteBarrierSlotType::kCompressed; V8_INLINE CompressedPointer() : value_(0u) {} V8_INLINE explicit CompressedPointer(const void* ptr) @@ -173,6 +180,8 @@ class V8_TRIVIAL_ABI CompressedPointer final { class V8_TRIVIAL_ABI RawPointer final { public: using IntegralType = uintptr_t; + static constexpr auto kWriteBarrierSlotType = + WriteBarrierSlotType::kUncompressed; V8_INLINE RawPointer() : ptr_(nullptr) {} V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {} diff --git a/include/cppgc/internal/pointer-policies.h b/include/cppgc/internal/pointer-policies.h index a513968312..06fa884f49 100644 --- a/include/cppgc/internal/pointer-policies.h +++ b/include/cppgc/internal/pointer-policies.h @@ -33,10 +33,11 @@ struct DijkstraWriteBarrierPolicy { // barrier doesn't break the tri-color invariant. } + template V8_INLINE static void AssigningBarrier(const void* slot, const void* value) { #ifdef CPPGC_SLIM_WRITE_BARRIER if (V8_UNLIKELY(WriteBarrier::IsEnabled())) - WriteBarrier::CombinedWriteBarrierSlow(slot); + WriteBarrier::CombinedWriteBarrierSlow(slot); #else // !CPPGC_SLIM_WRITE_BARRIER WriteBarrier::Params params; const WriteBarrier::Type type = @@ -45,12 +46,14 @@ struct DijkstraWriteBarrierPolicy { #endif // !CPPGC_SLIM_WRITE_BARRIER } - template - V8_INLINE static void AssigningBarrier(const void* slot, - MemberStorage storage) { + template + V8_INLINE static void AssigningBarrier(const void* slot, RawPointer storage) { + static_assert( + SlotType == WriteBarrierSlotType::kUncompressed, + "Assigning storages of Member and UncompressedMember is not supported"); #ifdef CPPGC_SLIM_WRITE_BARRIER if (V8_UNLIKELY(WriteBarrier::IsEnabled())) - WriteBarrier::CombinedWriteBarrierSlow(slot); + WriteBarrier::CombinedWriteBarrierSlow(slot); #else // !CPPGC_SLIM_WRITE_BARRIER WriteBarrier::Params params; const WriteBarrier::Type type = @@ -59,6 +62,25 @@ struct DijkstraWriteBarrierPolicy { #endif // !CPPGC_SLIM_WRITE_BARRIER } +#if defined(CPPGC_POINTER_COMPRESSION) + template + V8_INLINE static void AssigningBarrier(const void* slot, + CompressedPointer storage) { + static_assert( + SlotType == WriteBarrierSlotType::kCompressed, + "Assigning storages of Member and UncompressedMember is not supported"); +#ifdef CPPGC_SLIM_WRITE_BARRIER + if (V8_UNLIKELY(WriteBarrier::IsEnabled())) + WriteBarrier::CombinedWriteBarrierSlow(slot); +#else // !CPPGC_SLIM_WRITE_BARRIER + WriteBarrier::Params params; + const WriteBarrier::Type type = + WriteBarrier::GetWriteBarrierType(slot, storage, params); + WriteBarrier(type, params, slot, storage.Load()); +#endif // !CPPGC_SLIM_WRITE_BARRIER + } +#endif // defined(CPPGC_POINTER_COMPRESSION) + private: V8_INLINE static void WriteBarrier(WriteBarrier::Type type, const WriteBarrier::Params& params, @@ -79,8 +101,9 @@ struct DijkstraWriteBarrierPolicy { struct NoWriteBarrierPolicy { V8_INLINE static void InitializingBarrier(const void*, const void*) {} + template V8_INLINE static void AssigningBarrier(const void*, const void*) {} - template + template V8_INLINE static void AssigningBarrier(const void*, MemberStorage) {} }; diff --git a/include/cppgc/internal/write-barrier.h b/include/cppgc/internal/write-barrier.h index ec941b171a..566724d30a 100644 --- a/include/cppgc/internal/write-barrier.h +++ b/include/cppgc/internal/write-barrier.h @@ -84,6 +84,7 @@ class V8_EXPORT WriteBarrier final { // A write barrier that combines `GenerationalBarrier()` and // `DijkstraMarkingBarrier()`. We only pass a single parameter here to clobber // as few registers as possible. + template static V8_NOINLINE void V8_PRESERVE_MOST CombinedWriteBarrierSlow(const void* slot); #endif // CPPGC_SLIM_WRITE_BARRIER diff --git a/include/cppgc/member.h b/include/cppgc/member.h index 13d92b055d..b6382a0235 100644 --- a/include/cppgc/member.h +++ b/include/cppgc/member.h @@ -309,11 +309,13 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase, WriteBarrierPolicy::InitializingBarrier(Base::GetRawSlot(), value); } V8_INLINE void AssigningWriteBarrier(T* value) const { - WriteBarrierPolicy::AssigningBarrier(Base::GetRawSlot(), value); + WriteBarrierPolicy::template AssigningBarrier< + StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(), value); } V8_INLINE void AssigningWriteBarrier() const { - WriteBarrierPolicy::AssigningBarrier(Base::GetRawSlot(), - Base::GetRawStorage()); + WriteBarrierPolicy::template AssigningBarrier< + StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(), + Base::GetRawStorage()); } V8_INLINE void ClearFromGC() const { Base::ClearFromGC(); } diff --git a/include/v8-context.h b/include/v8-context.h index 3ce0eb0af3..0e6dc9a59b 100644 --- a/include/v8-context.h +++ b/include/v8-context.h @@ -365,8 +365,7 @@ Local Context::GetEmbedderData(int index) { #ifdef V8_COMPRESS_POINTERS // We read the full pointer value and then decompress it in order to avoid // dealing with potential endiannes issues. - value = - I::DecompressTaggedAnyField(embedder_data, static_cast(value)); + value = I::DecompressTaggedField(embedder_data, static_cast(value)); #endif internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject( *reinterpret_cast(this)); diff --git a/include/v8-internal.h b/include/v8-internal.h index ea03bc3e5a..db30a111f9 100644 --- a/include/v8-internal.h +++ b/include/v8-internal.h @@ -880,7 +880,7 @@ class Internals { return addr & -static_cast(kPtrComprCageBaseAlignment); } - V8_INLINE static internal::Address DecompressTaggedAnyField( + V8_INLINE static internal::Address DecompressTaggedField( internal::Address heap_object_ptr, uint32_t value) { internal::Address base = GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr); diff --git a/include/v8-isolate.h b/include/v8-isolate.h index 345395b179..d42acffab4 100644 --- a/include/v8-isolate.h +++ b/include/v8-isolate.h @@ -542,6 +542,7 @@ class V8_EXPORT Isolate { kAsyncStackTaggingCreateTaskCall = 116, kDurationFormat = 117, kInvalidatedNumberStringPrototypeNoReplaceProtector = 118, + kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode = 119, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to diff --git a/include/v8-object.h b/include/v8-object.h index d7332ba0c8..8f04f33fab 100644 --- a/include/v8-object.h +++ b/include/v8-object.h @@ -717,7 +717,7 @@ Local Object::GetInternalField(int index) { #ifdef V8_COMPRESS_POINTERS // We read the full pointer value and then decompress it in order to avoid // dealing with potential endiannes issues. - value = I::DecompressTaggedAnyField(obj, static_cast(value)); + value = I::DecompressTaggedField(obj, static_cast(value)); #endif internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(obj); diff --git a/include/v8config.h b/include/v8config.h index a967d92c39..b44995e7cd 100644 --- a/include/v8config.h +++ b/include/v8config.h @@ -346,12 +346,15 @@ path. Add it with -I to the command line # define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull)) # define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline)) # define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused)) -// Support for the "preserve_most" attribute is incomplete on 32-bit, and we see -// failures in component builds. Thus only use it in 64-bit non-component builds -// for now. -#if (defined(_M_X64) || defined(__x86_64__) || defined(__AARCH64EL__) || \ - defined(_M_ARM64)) /* x64 or arm64 */ \ - && !defined(COMPONENT_BUILD) +// Support for the "preserve_most" attribute is limited: +// - 32-bit platforms do not implement it, +// - component builds fail because _dl_runtime_resolve clobbers registers, +// - we see crashes on arm64 on Windows (https://crbug.com/1409934), which can +// hopefully be fixed in the future. +#if (defined(_M_X64) || defined(__x86_64__) /* x64 (everywhere) */ \ + || ((defined(__AARCH64EL__) || defined(_M_ARM64)) /* arm64, but ... */ \ + && !defined(_WIN32))) /* not on windows */ \ + && !defined(COMPONENT_BUILD) /* no component build */ # define V8_HAS_ATTRIBUTE_PRESERVE_MOST (__has_attribute(preserve_most)) #endif # define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility)) diff --git a/src/base/platform/platform-darwin.cc b/src/base/platform/platform-darwin.cc index bf360e3136..e4ce573ac3 100644 --- a/src/base/platform/platform-darwin.cc +++ b/src/base/platform/platform-darwin.cc @@ -48,14 +48,13 @@ std::vector OS::GetSharedLibraryAddresses() { for (unsigned int i = 0; i < images_count; ++i) { const mach_header* header = _dyld_get_image_header(i); if (header == nullptr) continue; + unsigned long size; #if V8_HOST_ARCH_I32 - unsigned int size; - char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size); + uint8_t* code_ptr = getsectiondata(header, SEG_TEXT, SECT_TEXT, &size); #else - uint64_t size; - char* code_ptr = getsectdatafromheader_64( - reinterpret_cast(header), SEG_TEXT, SECT_TEXT, - &size); + const mach_header_64* header64 = + reinterpret_cast(header); + uint8_t* code_ptr = getsectiondata(header64, SEG_TEXT, SECT_TEXT, &size); #endif if (code_ptr == nullptr) continue; const intptr_t slide = _dyld_get_image_vmaddr_slide(i); diff --git a/src/base/small-vector.h b/src/base/small-vector.h index a0395f0981..f6a1db5e62 100644 --- a/src/base/small-vector.h +++ b/src/base/small-vector.h @@ -81,13 +81,13 @@ class SmallVector { begin_ = other.begin_; end_ = other.end_; end_of_storage_ = other.end_of_storage_; - other.reset_to_inline_storage(); } else { DCHECK_GE(capacity(), other.size()); // Sanity check. size_t other_size = other.size(); memcpy(begin_, other.begin_, sizeof(T) * other_size); end_ = begin_ + other_size; } + other.reset_to_inline_storage(); return *this; } diff --git a/src/baseline/arm/baseline-assembler-arm-inl.h b/src/baseline/arm/baseline-assembler-arm-inl.h index 15ee64f686..69353a78d5 100644 --- a/src/baseline/arm/baseline-assembler-arm-inl.h +++ b/src/baseline/arm/baseline-assembler-arm-inl.h @@ -309,8 +309,8 @@ void BaselineAssembler::Pop(T... registers) { detail::PopAllHelper::Pop(this, registers...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { __ ldr(output, FieldMemOperand(source, offset)); } @@ -326,11 +326,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, SmiUntag(output); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - __ ldr(output, FieldMemOperand(source, offset)); -} - void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { __ ldrh(output, FieldMemOperand(source, offset)); @@ -372,8 +367,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance) { Label fallthrough; - LoadTaggedPointerField(scratch_and_result, feedback_vector, - FeedbackVector::OffsetOfElementAt(slot.ToInt())); + LoadTaggedField(scratch_and_result, feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt())); __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough); // Is it marked_for_deoptimization? If yes, clear the slot. @@ -398,8 +393,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ ldr(interrupt_budget, @@ -421,8 +416,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ ldr(interrupt_budget, @@ -437,16 +432,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); @@ -455,33 +450,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value, void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); if (cell_index > 0) { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(context, context, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -570,8 +561,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/arm64/baseline-assembler-arm64-inl.h b/src/baseline/arm64/baseline-assembler-arm64-inl.h index 02256fbd11..11f78981ad 100644 --- a/src/baseline/arm64/baseline-assembler-arm64-inl.h +++ b/src/baseline/arm64/baseline-assembler-arm64-inl.h @@ -369,9 +369,9 @@ void BaselineAssembler::Pop(T... registers) { detail::PopAllHelper::Pop(this, registers...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { - __ LoadTaggedPointerField(output, FieldMemOperand(source, offset)); +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { + __ LoadTaggedField(output, FieldMemOperand(source, offset)); } void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, @@ -386,11 +386,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, SmiUntag(output); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - __ LoadAnyTaggedField(output, FieldMemOperand(source, offset)); -} - void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { __ Ldrh(output, FieldMemOperand(source, offset)); @@ -440,8 +435,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch().W(); __ Ldr(interrupt_budget, @@ -463,8 +458,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch().W(); __ Ldr(interrupt_budget, @@ -479,16 +474,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); @@ -497,33 +492,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value, void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); if (cell_index > 0) { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(context, context, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -571,7 +562,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, { const int instruction_count = num_labels * instructions_per_label + instructions_per_jump_target; - TurboAssembler::BlockPoolsScope block_pools(masm_, + MacroAssembler::BlockPoolsScope block_pools(masm_, instruction_count * kInstrSize); __ Bind(&table); for (int i = 0; i < num_labels; ++i) { @@ -630,7 +621,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/baseline-assembler-inl.h b/src/baseline/baseline-assembler-inl.h index f692af4e13..71c4e7a5eb 100644 --- a/src/baseline/baseline-assembler-inl.h +++ b/src/baseline/baseline-assembler-inl.h @@ -114,13 +114,12 @@ void BaselineAssembler::SmiUntag(Register output, Register value) { void BaselineAssembler::LoadFixedArrayElement(Register output, Register array, int32_t index) { - LoadTaggedAnyField(output, array, - FixedArray::kHeaderSize + index * kTaggedSize); + LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize); } void BaselineAssembler::LoadPrototype(Register prototype, Register object) { __ LoadMap(prototype, object); - LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset); + LoadTaggedField(prototype, prototype, Map::kPrototypeOffset); } void BaselineAssembler::LoadContext(Register output) { LoadRegister(output, interpreter::Register::current_context()); diff --git a/src/baseline/baseline-assembler.h b/src/baseline/baseline-assembler.h index d661c52289..9b0f612f94 100644 --- a/src/baseline/baseline-assembler.h +++ b/src/baseline/baseline-assembler.h @@ -147,13 +147,11 @@ class BaselineAssembler { inline void TailCallBuiltin(Builtin builtin); inline void CallRuntime(Runtime::FunctionId function, int nargs); - inline void LoadTaggedPointerField(Register output, Register source, - int offset); + inline void LoadTaggedField(Register output, Register source, int offset); inline void LoadTaggedSignedField(Register output, Register source, int offset); inline void LoadTaggedSignedFieldAndUntag(Register output, Register source, int offset); - inline void LoadTaggedAnyField(Register output, Register source, int offset); inline void LoadWord16FieldZeroExtend(Register output, Register source, int offset); inline void LoadWord8Field(Register output, Register source, int offset); @@ -170,16 +168,12 @@ class BaselineAssembler { // X64 supports complex addressing mode, pointer decompression can be done by // [%compressed_base + %r1 + K]. #if V8_TARGET_ARCH_X64 - inline void LoadTaggedPointerField(TaggedRegister output, Register source, - int offset); - inline void LoadTaggedPointerField(TaggedRegister output, - TaggedRegister source, int offset); - inline void LoadTaggedPointerField(Register output, TaggedRegister source, - int offset); - inline void LoadTaggedAnyField(Register output, TaggedRegister source, - int offset); - inline void LoadTaggedAnyField(TaggedRegister output, TaggedRegister source, - int offset); + inline void LoadTaggedField(TaggedRegister output, Register source, + int offset); + inline void LoadTaggedField(TaggedRegister output, TaggedRegister source, + int offset); + inline void LoadTaggedField(Register output, TaggedRegister source, + int offset); inline void LoadFixedArrayElement(Register output, TaggedRegister array, int32_t index); inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array, diff --git a/src/baseline/baseline-compiler.cc b/src/baseline/baseline-compiler.cc index 6d611d66fe..9d34c07d43 100644 --- a/src/baseline/baseline-compiler.cc +++ b/src/baseline/baseline-compiler.cc @@ -439,8 +439,8 @@ void BaselineCompiler::LoadFeedbackVector(Register output) { void BaselineCompiler::LoadClosureFeedbackArray(Register output) { LoadFeedbackVector(output); - __ LoadTaggedPointerField(output, output, - FeedbackVector::kClosureFeedbackCellArrayOffset); + __ LoadTaggedField(output, output, + FeedbackVector::kClosureFeedbackCellArrayOffset); } void BaselineCompiler::SelectBooleanConstant( @@ -754,8 +754,8 @@ void BaselineCompiler::VisitLdaCurrentContextSlot() { BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); Register context = scratch_scope.AcquireScratch(); __ LoadContext(context); - __ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(Index(0))); + __ LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(Index(0))); } void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() { @@ -1350,9 +1350,9 @@ void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject( void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode( interpreter::RegisterList args) { __ LoadRegister(kInterpreterAccumulatorRegister, args[0]); - __ LoadTaggedAnyField(kInterpreterAccumulatorRegister, - kInterpreterAccumulatorRegister, - JSGeneratorObject::kResumeModeOffset); + __ LoadTaggedField(kInterpreterAccumulatorRegister, + kInterpreterAccumulatorRegister, + JSGeneratorObject::kResumeModeOffset); } void BaselineCompiler::VisitIntrinsicGeneratorClose( @@ -2211,8 +2211,8 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() { Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)); Register context = scratch_scope.AcquireScratch(); - __ LoadTaggedAnyField(context, generator_object, - JSGeneratorObject::kContextOffset); + __ LoadTaggedField(context, generator_object, + JSGeneratorObject::kContextOffset); __ StoreContext(context); interpreter::JumpTableTargetOffsets offsets = diff --git a/src/baseline/ia32/baseline-assembler-ia32-inl.h b/src/baseline/ia32/baseline-assembler-ia32-inl.h index 68aa0eeab6..ba475f0c5a 100644 --- a/src/baseline/ia32/baseline-assembler-ia32-inl.h +++ b/src/baseline/ia32/baseline-assembler-ia32-inl.h @@ -293,8 +293,8 @@ void BaselineAssembler::Pop(T... registers) { (__ Pop(registers), ...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { __ mov(output, FieldOperand(source, offset)); } @@ -310,11 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, SmiUntag(output); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - __ mov(output, FieldOperand(source, offset)); -} - void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { __ movzx_w(output, FieldOperand(source, offset)); @@ -354,8 +349,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance distance) { Label fallthrough; - LoadTaggedPointerField(scratch_and_result, feedback_vector, - FeedbackVector::OffsetOfElementAt(slot.ToInt())); + LoadTaggedField(scratch_and_result, feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt())); __ LoadWeakValue(scratch_and_result, &fallthrough); // Is it marked_for_deoptimization? If yes, clear the slot. @@ -378,8 +373,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); __ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), Immediate(weight)); if (skip_interrupt_label) { @@ -395,8 +390,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( Register feedback_cell = scratch_scope.AcquireScratch(); DCHECK(!AreAliased(feedback_cell, weight)); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); __ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset), weight); if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label); @@ -405,16 +400,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); @@ -423,33 +418,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value, void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); if (cell_index > 0) { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(context, context, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -539,8 +530,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // Drop receiver + arguments. __ masm()->DropArguments(params_size, scratch, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/loong64/baseline-assembler-loong64-inl.h b/src/baseline/loong64/baseline-assembler-loong64-inl.h index 546854e73f..c45d94f6ae 100644 --- a/src/baseline/loong64/baseline-assembler-loong64-inl.h +++ b/src/baseline/loong64/baseline-assembler-loong64-inl.h @@ -296,8 +296,8 @@ void BaselineAssembler::Pop(T... registers) { detail::PopAllHelper::Pop(this, registers...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { __ Ld_d(output, FieldMemOperand(source, offset)); } void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, @@ -310,10 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, LoadTaggedSignedField(output, source, offset); SmiUntag(output); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - __ Ld_d(output, FieldMemOperand(source, offset)); -} void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { __ Ld_hu(output, FieldMemOperand(source, offset)); @@ -350,8 +346,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance) { Label fallthrough; - LoadTaggedPointerField(scratch_and_result, feedback_vector, - FeedbackVector::OffsetOfElementAt(slot.ToInt())); + LoadTaggedField(scratch_and_result, feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt())); __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough); // Is it marked_for_deoptimization? If yes, clear the slot. { @@ -374,8 +370,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Ld_w(interrupt_budget, @@ -394,8 +390,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Ld_w(interrupt_budget, @@ -410,16 +406,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); @@ -428,33 +424,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value, void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); if (cell_index > 0) { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(context, context, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -533,8 +525,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/mips64/baseline-assembler-mips64-inl.h b/src/baseline/mips64/baseline-assembler-mips64-inl.h index 522efd23c8..9cfe39d98a 100644 --- a/src/baseline/mips64/baseline-assembler-mips64-inl.h +++ b/src/baseline/mips64/baseline-assembler-mips64-inl.h @@ -304,8 +304,8 @@ void BaselineAssembler::Pop(T... registers) { detail::PopAllHelper::Pop(this, registers...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { __ Ld(output, FieldMemOperand(source, offset)); } void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, @@ -318,10 +318,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, LoadTaggedSignedField(output, source, offset); SmiUntag(output); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - __ Ld(output, FieldMemOperand(source, offset)); -} void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { __ Lhu(output, FieldMemOperand(source, offset)); @@ -360,8 +356,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance) { Label fallthrough; - LoadTaggedPointerField(scratch_and_result, feedback_vector, - FeedbackVector::OffsetOfElementAt(slot.ToInt())); + LoadTaggedField(scratch_and_result, feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt())); __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough); // Is it marked_for_deoptimization? If yes, clear the slot. { @@ -384,8 +380,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Lw(interrupt_budget, @@ -404,8 +400,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Lw(interrupt_budget, @@ -420,16 +416,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); @@ -438,33 +434,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value, void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); if (cell_index > 0) { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(context, context, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -544,8 +536,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/ppc/baseline-assembler-ppc-inl.h b/src/baseline/ppc/baseline-assembler-ppc-inl.h index 4196551aa1..9f85abfc40 100644 --- a/src/baseline/ppc/baseline-assembler-ppc-inl.h +++ b/src/baseline/ppc/baseline-assembler-ppc-inl.h @@ -49,31 +49,6 @@ class BaselineAssembler::ScratchRegisterScope { int registers_used_; }; -inline bool IsSignedCondition(Condition cond) { - switch (cond) { - case kEqual: - case kNotEqual: - case kLessThan: - case kGreaterThan: - case kLessThanEqual: - case kGreaterThanEqual: - case kOverflow: - case kNoOverflow: - case kZero: - case kNotZero: - return true; - - case kUnsignedLessThan: - case kUnsignedGreaterThan: - case kUnsignedLessThanEqual: - case kUnsignedGreaterThanEqual: - return false; - - default: - UNREACHABLE(); - } -} - #define __ assm-> // ppc helper template @@ -82,19 +57,19 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs, static_assert(width == 64 || width == 32, "only support 64 and 32 bit compare"); if (width == 64) { - if (IsSignedCondition(cc)) { + if (is_signed(cc)) { __ CmpS64(lhs, rhs); } else { __ CmpU64(lhs, rhs); } } else { - if (IsSignedCondition(cc)) { + if (is_signed(cc)) { __ CmpS32(lhs, rhs); } else { __ CmpU32(lhs, rhs); } } - __ b(check_condition(cc), target); + __ b(to_condition(cc), target); } #undef __ @@ -160,18 +135,18 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); __ AndU64(r0, value, Operand(mask), ip, SetRC); - __ b(check_condition(cc), target, cr0); + __ b(to_condition(cc), target, cr0); } void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); - if (IsSignedCondition(cc)) { + if (is_signed(cc)) { __ CmpS64(lhs, rhs, r0); } else { __ CmpU64(lhs, rhs, r0); } - __ b(check_condition(cc), target); + __ b(to_condition(cc), target); } void BaselineAssembler::JumpIfObjectType(Condition cc, Register object, @@ -231,7 +206,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value, MemOperand operand, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); - __ LoadTaggedPointerField(ip, operand, r0); + __ LoadTaggedField(ip, operand, r0); JumpIfHelper(masm_, cc, value, ip, target); } @@ -239,7 +214,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, Register value, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); - __ LoadTaggedPointerField(ip, operand, r0); + __ LoadTaggedField(ip, operand, r0); JumpIfHelper(masm_, cc, value, ip, target); } @@ -399,10 +374,10 @@ void BaselineAssembler::Pop(T... registers) { detail::PopAllHelper::Pop(this, registers...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { ASM_CODE_COMMENT(masm_); - __ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0); + __ LoadTaggedField(output, FieldMemOperand(source, offset), r0); } void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, @@ -418,12 +393,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, SmiUntag(output); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - ASM_CODE_COMMENT(masm_); - __ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0); -} - void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { ASM_CODE_COMMENT(masm_); @@ -468,8 +437,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance) { Label fallthrough; - LoadTaggedPointerField(scratch_and_result, feedback_vector, - FeedbackVector::OffsetOfElementAt(slot.ToInt())); + LoadTaggedField(scratch_and_result, feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt())); __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough); // Is it marked_for_deoptimization? If yes, clear the slot. @@ -494,8 +463,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( @@ -519,8 +488,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( @@ -538,17 +507,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, uint32_t depth) { ASM_CODE_COMMENT(masm_); for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { ASM_CODE_COMMENT(masm_); for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); @@ -558,34 +527,30 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, uint32_t depth) { ASM_CODE_COMMENT(masm_); for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); if (cell_index > 0) { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(context, context, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth) { ASM_CODE_COMMENT(masm_); for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -684,8 +649,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/riscv/baseline-assembler-riscv-inl.h b/src/baseline/riscv/baseline-assembler-riscv-inl.h index 9b5a1e6450..e1d83849b7 100644 --- a/src/baseline/riscv/baseline-assembler-riscv-inl.h +++ b/src/baseline/riscv/baseline-assembler-riscv-inl.h @@ -297,9 +297,9 @@ void BaselineAssembler::Pop(T... registers) { detail::PopAllHelper::Pop(this, registers...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { - __ LoadTaggedPointerField(output, FieldMemOperand(source, offset)); +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { + __ LoadTaggedField(output, FieldMemOperand(source, offset)); } void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, int offset) { @@ -311,10 +311,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, LoadTaggedSignedField(output, source, offset); SmiUntag(output); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - __ LoadAnyTaggedField(output, FieldMemOperand(source, offset)); -} void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { __ Lhu(output, FieldMemOperand(source, offset)); @@ -351,8 +347,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance) { Label fallthrough, clear_slot; - LoadTaggedPointerField(scratch_and_result, feedback_vector, - FeedbackVector::OffsetOfElementAt(slot.ToInt())); + LoadTaggedField(scratch_and_result, feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt())); __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough); // Is it marked_for_deoptimization? If yes, clear the slot. @@ -379,8 +375,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Lw(interrupt_budget, @@ -401,8 +397,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ Lw(interrupt_budget, @@ -419,16 +415,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); @@ -437,33 +433,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value, void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); if (cell_index > 0) { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(context, context, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -508,7 +500,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base, __ CalcScaledAddress(t6, t6, reg, entry_size_log2); __ Jump(t6); { - TurboAssembler::BlockTrampolinePoolScope(masm()); + MacroAssembler::BlockTrampolinePoolScope(masm()); __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2); __ bind(&table); for (int i = 0; i < num_labels; ++i) { diff --git a/src/baseline/s390/baseline-assembler-s390-inl.h b/src/baseline/s390/baseline-assembler-s390-inl.h index 087c4f1b12..778b3a57ae 100644 --- a/src/baseline/s390/baseline-assembler-s390-inl.h +++ b/src/baseline/s390/baseline-assembler-s390-inl.h @@ -48,31 +48,6 @@ class BaselineAssembler::ScratchRegisterScope { int registers_used_; }; -inline bool IsSignedCondition(Condition cond) { - switch (cond) { - case kEqual: - case kNotEqual: - case kLessThan: - case kGreaterThan: - case kLessThanEqual: - case kGreaterThanEqual: - case kOverflow: - case kNoOverflow: - case kZero: - case kNotZero: - return true; - - case kUnsignedLessThan: - case kUnsignedGreaterThan: - case kUnsignedLessThanEqual: - case kUnsignedGreaterThanEqual: - return false; - - default: - UNREACHABLE(); - } -} - #define __ assm-> // s390x helper template @@ -81,19 +56,19 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs, static_assert(width == 64 || width == 32, "only support 64 and 32 bit compare"); if (width == 64) { - if (IsSignedCondition(cc)) { + if (is_signed(cc)) { __ CmpS64(lhs, rhs); } else { __ CmpU64(lhs, rhs); } } else { - if (IsSignedCondition(cc)) { + if (is_signed(cc)) { __ CmpS32(lhs, rhs); } else { __ CmpU32(lhs, rhs); } } - __ b(check_condition(cc), target); + __ b(to_condition(cc), target); } #undef __ @@ -159,18 +134,18 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); __ AndP(r0, value, Operand(mask)); - __ b(check_condition(cc), target); + __ b(to_condition(cc), target); } void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, Label* target, Label::Distance) { ASM_CODE_COMMENT(masm_); - if (IsSignedCondition(cc)) { + if (is_signed(cc)) { __ CmpS64(lhs, rhs); } else { __ CmpU64(lhs, rhs); } - __ b(check_condition(cc), target); + __ b(to_condition(cc), target); } void BaselineAssembler::JumpIfObjectType(Condition cc, Register object, @@ -236,9 +211,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value, if (COMPRESS_POINTERS_BOOL) { MemOperand addr = MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias); - __ LoadTaggedPointerField(ip, addr, r0); + __ LoadTaggedField(ip, addr, r0); } else { - __ LoadTaggedPointerField(ip, operand, r0); + __ LoadTaggedField(ip, operand, r0); } JumpIfHelper(masm_, cc, value, ip, target); } @@ -251,9 +226,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, if (COMPRESS_POINTERS_BOOL) { MemOperand addr = MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias); - __ LoadTaggedPointerField(ip, addr, r0); + __ LoadTaggedField(ip, addr, r0); } else { - __ LoadTaggedPointerField(ip, operand, r0); + __ LoadTaggedField(ip, operand, r0); } JumpIfHelper(masm_, cc, ip, value, target); } @@ -412,10 +387,10 @@ void BaselineAssembler::Pop(T... registers) { detail::PopAllHelper::Pop(this, registers...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { ASM_CODE_COMMENT(masm_); - __ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0); + __ LoadTaggedField(output, FieldMemOperand(source, offset), r0); } void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, @@ -431,12 +406,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, SmiUntag(output); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - ASM_CODE_COMMENT(masm_); - __ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0); -} - void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { ASM_CODE_COMMENT(masm_); @@ -481,8 +450,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance) { Label fallthrough; - LoadTaggedPointerField(scratch_and_result, feedback_vector, - FeedbackVector::OffsetOfElementAt(slot.ToInt())); + LoadTaggedField(scratch_and_result, feedback_vector, + FeedbackVector::OffsetOfElementAt(slot.ToInt())); __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough); // Is it marked_for_deoptimization? If yes, clear the slot. @@ -507,8 +476,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( @@ -532,8 +501,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ScratchRegisterScope scratch_scope(this); Register feedback_cell = scratch_scope.AcquireScratch(); LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); Register interrupt_budget = scratch_scope.AcquireScratch(); __ LoadU32( @@ -550,16 +519,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } void BaselineAssembler::StaContextSlot(Register context, Register value, uint32_t index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), value); @@ -568,33 +537,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value, void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); if (cell_index > 0) { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(context, context, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, int cell_index, uint32_t depth) { for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); + LoadTaggedField(context, context, Context::kPreviousOffset); } - LoadTaggedPointerField(context, context, Context::kExtensionOffset); - LoadTaggedPointerField(context, context, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(context, context, Context::kExtensionOffset); + LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -692,8 +657,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { __ masm()->LeaveFrame(StackFrame::BASELINE); // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/baseline/x64/baseline-assembler-x64-inl.h b/src/baseline/x64/baseline-assembler-x64-inl.h index fe57df754a..ea17e2f721 100644 --- a/src/baseline/x64/baseline-assembler-x64-inl.h +++ b/src/baseline/x64/baseline-assembler-x64-inl.h @@ -287,9 +287,9 @@ void BaselineAssembler::Pop(T... registers) { (__ Pop(registers), ...); } -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { - __ LoadTaggedPointerField(output, FieldOperand(source, offset)); +void BaselineAssembler::LoadTaggedField(Register output, Register source, + int offset) { + __ LoadTaggedField(output, FieldOperand(source, offset)); } void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, int offset) { @@ -300,10 +300,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, int offset) { __ SmiUntagField(output, FieldOperand(source, offset)); } -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - __ LoadAnyTaggedField(output, FieldOperand(source, offset)); -} void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, Register source, int offset) { __ movzxwq(output, FieldOperand(source, offset)); @@ -331,45 +327,31 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, __ StoreTaggedField(FieldOperand(target, offset), value); } -void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output, - Register source, int offset) { - __ LoadTaggedPointerField(output, FieldOperand(source, offset)); +void BaselineAssembler::LoadTaggedField(TaggedRegister output, Register source, + int offset) { + __ LoadTaggedField(output, FieldOperand(source, offset)); } -void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output, - TaggedRegister source, - int offset) { - __ LoadTaggedPointerField(output, FieldOperand(source, offset)); +void BaselineAssembler::LoadTaggedField(TaggedRegister output, + TaggedRegister source, int offset) { + __ LoadTaggedField(output, FieldOperand(source, offset)); } -void BaselineAssembler::LoadTaggedPointerField(Register output, - TaggedRegister source, - int offset) { - __ LoadTaggedPointerField(output, FieldOperand(source, offset)); -} - -void BaselineAssembler::LoadTaggedAnyField(Register output, - TaggedRegister source, int offset) { - __ LoadAnyTaggedField(output, FieldOperand(source, offset)); -} - -void BaselineAssembler::LoadTaggedAnyField(TaggedRegister output, - TaggedRegister source, int offset) { - __ LoadAnyTaggedField(output, FieldOperand(source, offset)); +void BaselineAssembler::LoadTaggedField(Register output, TaggedRegister source, + int offset) { + __ LoadTaggedField(output, FieldOperand(source, offset)); } void BaselineAssembler::LoadFixedArrayElement(Register output, TaggedRegister array, int32_t index) { - LoadTaggedAnyField(output, array, - FixedArray::kHeaderSize + index * kTaggedSize); + LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize); } void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output, TaggedRegister array, int32_t index) { - LoadTaggedAnyField(output, array, - FixedArray::kHeaderSize + index * kTaggedSize); + LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize); } void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, @@ -389,8 +371,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( LoadFunction(feedback_cell); // Decompresses pointer by complex addressing mode when necessary. TaggedRegister tagged(feedback_cell); - LoadTaggedPointerField(tagged, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset); __ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), Immediate(weight)); if (skip_interrupt_label) { @@ -407,8 +388,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( LoadFunction(feedback_cell); // Decompresses pointer by complex addressing mode when necessary. TaggedRegister tagged(feedback_cell); - LoadTaggedPointerField(tagged, feedback_cell, - JSFunction::kFeedbackCellOffset); + LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset); __ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight); if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label); } @@ -420,17 +400,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, // addressing mode, any intermediate context pointer is loaded in compressed // form. if (depth == 0) { - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, context, + Context::OffsetOfElementAt(index)); } else { TaggedRegister tagged(context); - LoadTaggedPointerField(tagged, context, Context::kPreviousOffset); + LoadTaggedField(tagged, context, Context::kPreviousOffset); --depth; for (; depth > 0; --depth) { - LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset); + LoadTaggedField(tagged, tagged, Context::kPreviousOffset); } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged, - Context::OffsetOfElementAt(index)); + LoadTaggedField(kInterpreterAccumulatorRegister, tagged, + Context::OffsetOfElementAt(index)); } } @@ -442,10 +422,10 @@ void BaselineAssembler::StaContextSlot(Register context, Register value, // form. if (depth > 0) { TaggedRegister tagged(context); - LoadTaggedPointerField(tagged, context, Context::kPreviousOffset); + LoadTaggedField(tagged, context, Context::kPreviousOffset); --depth; for (; depth > 0; --depth) { - LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset); + LoadTaggedField(tagged, tagged, Context::kPreviousOffset); } if (COMPRESS_POINTERS_BOOL) { // Decompress tagged pointer. @@ -463,29 +443,26 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index, // enabled, any intermediate context pointer is loaded in compressed form. TaggedRegister tagged(context); if (depth == 0) { - LoadTaggedPointerField(tagged, context, Context::kExtensionOffset); + LoadTaggedField(tagged, context, Context::kExtensionOffset); } else { - LoadTaggedPointerField(tagged, context, Context::kPreviousOffset); + LoadTaggedField(tagged, context, Context::kPreviousOffset); --depth; for (; depth > 0; --depth) { - LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset); + LoadTaggedField(tagged, tagged, Context::kPreviousOffset); } - LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset); + LoadTaggedField(tagged, tagged, Context::kExtensionOffset); } if (cell_index > 0) { - LoadTaggedPointerField(tagged, tagged, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; } else { - LoadTaggedPointerField(tagged, tagged, - SourceTextModule::kRegularImportsOffset); + LoadTaggedField(tagged, tagged, SourceTextModule::kRegularImportsOffset); // The actual array index is (-cell_index - 1). cell_index = -cell_index - 1; } LoadFixedArrayElement(tagged, tagged, cell_index); - LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged, - Cell::kValueOffset); + LoadTaggedField(kInterpreterAccumulatorRegister, tagged, Cell::kValueOffset); } void BaselineAssembler::StaModuleVariable(Register context, Register value, @@ -495,17 +472,16 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value, // enabled, any intermediate context pointer is loaded in compressed form. TaggedRegister tagged(context); if (depth == 0) { - LoadTaggedPointerField(tagged, context, Context::kExtensionOffset); + LoadTaggedField(tagged, context, Context::kExtensionOffset); } else { - LoadTaggedPointerField(tagged, context, Context::kPreviousOffset); + LoadTaggedField(tagged, context, Context::kPreviousOffset); --depth; for (; depth > 0; --depth) { - LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset); + LoadTaggedField(tagged, tagged, Context::kPreviousOffset); } - LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset); + LoadTaggedField(tagged, tagged, Context::kExtensionOffset); } - LoadTaggedPointerField(tagged, tagged, - SourceTextModule::kRegularExportsOffset); + LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset); // The actual array index is (cell_index - 1). cell_index -= 1; @@ -587,8 +563,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { // Drop receiver + arguments. __ masm()->DropArguments(params_size, scratch, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ masm()->Ret(); } diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc index be2d6505e2..bdf9df508d 100644 --- a/src/builtins/arm/builtins-arm.cc +++ b/src/builtins/arm/builtins-arm.cc @@ -130,8 +130,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(scratch, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(scratch, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ Jump(lr); __ bind(&stack_overflow); @@ -278,8 +278,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(r1, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(r1, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ Jump(lr); __ bind(&check_receiver); @@ -826,8 +826,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1352,7 +1352,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, __ sub(start_address, start_address, scratch); // Push the arguments. __ PushArray(start_address, num_args, scratch, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1820,8 +1820,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg __ cmp(r0, Operand(JSParameterCount(2)), ge); __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray - __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1897,8 +1897,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument __ cmp(r0, Operand(JSParameterCount(3)), ge); __ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList - __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1940,8 +1940,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList __ cmp(r0, Operand(JSParameterCount(3)), ge); __ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target - __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r0, r4, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc index 79ce842737..2eafde0327 100644 --- a/src/builtins/arm64/builtins-arm64.cc +++ b/src/builtins/arm64/builtins-arm64.cc @@ -163,7 +163,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(x1, MacroAssembler::kCountIncludesReceiver); __ Ret(); __ Bind(&stack_overflow); @@ -213,7 +213,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // -- sp[4*kSystemPointerSize]: context (pushed by FrameScope) // ----------------------------------- - __ LoadTaggedPointerField( + __ LoadTaggedField( x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(w4); @@ -348,7 +348,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Leave construct frame. __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(x1, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(x1, MacroAssembler::kCountIncludesReceiver); __ Ret(); // Otherwise we do a smi check and fall through to check if the return value @@ -423,7 +423,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, } __ Cmp(scratch1, INTERPRETER_DATA_TYPE); __ B(ne, &done); - __ LoadTaggedPointerField( + __ LoadTaggedField( sfi_data, FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); __ Bind(&done); @@ -446,10 +446,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ AssertGeneratorObject(x1); // Load suspended function and context. - __ LoadTaggedPointerField( - x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset)); - __ LoadTaggedPointerField(cp, - FieldMemOperand(x4, JSFunction::kContextOffset)); + __ LoadTaggedField(x4, + FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(cp, FieldMemOperand(x4, JSFunction::kContextOffset)); // Flood function if we are stepping. Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; @@ -477,7 +476,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ B(lo, &stack_overflow); // Get number of arguments for generator function. - __ LoadTaggedPointerField( + __ LoadTaggedField( x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset)); __ Ldrh(w10, FieldMemOperand( x10, SharedFunctionInfo::kFormalParameterCountOffset)); @@ -493,8 +492,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2)); // Poke receiver into highest claimed slot. - __ LoadTaggedPointerField( - x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset)); + __ LoadTaggedField(x5, + FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset)); __ Poke(x5, __ ReceiverOperand(x10)); // ----------- S t a t e ------------- @@ -507,7 +506,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // ----------------------------------- // Copy the function arguments from the generator object's register file. - __ LoadTaggedPointerField( + __ LoadTaggedField( x5, FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset)); { @@ -518,7 +517,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ Bind(&loop); __ Sub(x10, x10, 1); - __ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex)); + __ LoadTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex)); __ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex)); __ Cbnz(x10, &loop); __ Bind(&done); @@ -527,9 +526,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Underlying function needs to have bytecode available. if (v8_flags.debug_code) { Label is_baseline; - __ LoadTaggedPointerField( + __ LoadTaggedField( x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset)); GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline); __ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE); @@ -539,7 +538,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Resume (Ignition/TurboFan) generator object. { - __ LoadTaggedPointerField( + __ LoadTaggedField( x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset)); __ Ldrh(w0, FieldMemOperand( x0, SharedFunctionInfo::kFormalParameterCountOffset)); @@ -549,7 +548,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Mov(x3, x1); __ Mov(x1, x4); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); - __ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset)); + __ LoadTaggedField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset)); __ JumpCodeObject(x2); } @@ -561,8 +560,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Push(x1, padreg, x4, x5); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(padreg, x1); - __ LoadTaggedPointerField( - x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(x4, + FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset)); } __ B(&stepping_prepared); @@ -572,8 +571,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Push(x1, padreg); __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); __ Pop(padreg, x1); - __ LoadTaggedPointerField( - x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(x4, + FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset)); } __ B(&stepping_prepared); @@ -1108,11 +1107,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kClosure); // Load the feedback vector from the closure. Register feedback_vector = temps.AcquireX(); - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); __ AssertFeedbackVector(feedback_vector, x4); // Check the tiering state. @@ -1205,7 +1203,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { { ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); // Drop the frame created by the baseline call. - __ Pop(fp, lr); + __ Pop(fp, lr); __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ Trap(); } @@ -1270,9 +1268,9 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Get the bytecode array from the function object and load it into // kInterpreterBytecodeArrayRegister. - __ LoadTaggedPointerField( + __ LoadTaggedField( x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( kInterpreterBytecodeArrayRegister, FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset)); @@ -1288,17 +1286,16 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ B(ne, &compile_lazy); // Load the feedback vector from the closure. - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label push_stack_frame; // Check if feedback vector is valid. If valid, check for optimized code // and update invocation count. Otherwise, setup the stack frame. - __ LoadTaggedPointerField( - x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ LoadTaggedField(x7, + FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); __ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset)); __ Cmp(x7, FEEDBACK_VECTOR_TYPE); __ B(ne, &push_stack_frame); @@ -1330,7 +1327,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // the frame (that is done below). __ Bind(&push_stack_frame); FrameScope frame_scope(masm, StackFrame::MANUAL); - __ Push(lr, fp); + __ Push(lr, fp); __ mov(fp, sp); __ Push(cp, closure); @@ -1342,7 +1339,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Push actual argument count, bytecode array, Smi tagged bytecode array // offset and an undefined (to properly align the stack pointer). - static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1); + static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1); __ SmiTag(x6, kInterpreterBytecodeOffsetRegister); __ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister); __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); @@ -1480,16 +1477,16 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ bind(&is_baseline); { // Load the feedback vector from the closure. - __ LoadTaggedPointerField( + __ LoadTaggedField( feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to // allocate it. - __ LoadTaggedPointerField( + __ LoadTaggedField( x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); __ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset)); __ Cmp(x7, FEEDBACK_VECTOR_TYPE); @@ -1582,7 +1579,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, } __ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy, - TurboAssembler::kDstLessThanSrcAndReverse); + MacroAssembler::kDstLessThanSrcAndReverse); if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { // Store "undefined" as the receiver arg if we need to. @@ -1732,16 +1729,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { // get the custom trampoline, otherwise grab the entry address of the global // trampoline. __ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset)); __ CompareObjectType(x1, kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister, INTERPRETER_DATA_TYPE); __ B(ne, &builtin_trampoline); - __ LoadTaggedPointerField( + __ LoadTaggedField( x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset)); __ LoadCodeEntry(x1, x1); __ B(&trampoline_loaded); @@ -1882,7 +1879,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, // Restore fp, lr. __ Mov(sp, fp); - __ Pop(fp, lr); + __ Pop(fp, lr); __ LoadEntryFromBuiltinIndex(builtin); __ Jump(builtin); @@ -1997,7 +1994,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load deoptimization data from the code object. // = [#deoptimization_data_offset] - __ LoadTaggedPointerField( + __ LoadTaggedField( x1, FieldMemOperand( x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset)); @@ -2069,7 +2066,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Peek(arg_array, 2 * kSystemPointerSize); __ bind(&done); } - __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver); __ PushArgument(this_arg); // ----------- S t a t e ------------- @@ -2158,7 +2155,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { __ SlotAddress(copy_from, count); __ Add(copy_to, copy_from, kSystemPointerSize); __ CopyDoubleWords(copy_to, copy_from, count, - TurboAssembler::kSrcLessThanDst); + MacroAssembler::kSrcLessThanDst); __ Drop(2); } @@ -2206,7 +2203,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Peek(arguments_list, 3 * kSystemPointerSize); __ bind(&done); } - __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver); __ PushArgument(this_argument); // ----------- S t a t e ------------- @@ -2264,7 +2261,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ bind(&done); } - __ DropArguments(argc, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc, MacroAssembler::kCountIncludesReceiver); // Push receiver (undefined). __ PushArgument(undefined_value); @@ -2348,7 +2345,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0. Label ok, fail; __ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray); - __ LoadTaggedPointerField(x10, FieldMemOperand(x2, HeapObject::kMapOffset)); + __ LoadTaggedField(x10, FieldMemOperand(x2, HeapObject::kMapOffset)); __ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset)); __ Cmp(x13, FIXED_ARRAY_TYPE); __ B(eq, &ok); @@ -2394,7 +2391,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ Add(argc, argc, len); // Update new argc. __ Bind(&loop); __ Sub(len, len, 1); - __ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex)); + __ LoadTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex)); __ CmpTagged(scratch, the_hole_value); __ Csel(scratch, scratch, undefined_value, ne); __ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex)); @@ -2426,7 +2423,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, if (mode == CallOrConstructMode::kConstruct) { Label new_target_constructor, new_target_not_constructor; __ JumpIfSmi(x3, &new_target_not_constructor); - __ LoadTaggedPointerField(x5, FieldMemOperand(x3, HeapObject::kMapOffset)); + __ LoadTaggedField(x5, FieldMemOperand(x3, HeapObject::kMapOffset)); __ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset)); __ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask, &new_target_constructor); @@ -2486,14 +2483,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // ----------------------------------- __ AssertCallableFunction(x1); - __ LoadTaggedPointerField( + __ LoadTaggedField( x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function // context in case of conversion. - __ LoadTaggedPointerField(cp, - FieldMemOperand(x1, JSFunction::kContextOffset)); + __ LoadTaggedField(cp, FieldMemOperand(x1, JSFunction::kContextOffset)); // We need to convert the receiver for non-native sloppy mode functions. Label done_convert; __ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset)); @@ -2545,7 +2541,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Pop(cp, x1, x0, padreg); __ SmiUntag(x0); } - __ LoadTaggedPointerField( + __ LoadTaggedField( x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); __ Bind(&convert_receiver); } @@ -2579,7 +2575,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Load [[BoundArguments]] into x2 and length of that into x4. Label no_bound_arguments; - __ LoadTaggedPointerField( + __ LoadTaggedField( bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset)); __ SmiUntagField(bound_argc, FieldMemOperand(bound_argv, FixedArray::kLengthOffset)); @@ -2662,7 +2658,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ SlotAddress(copy_to, total_argc); __ Sub(copy_from, copy_to, kSystemPointerSize); __ CopyDoubleWords(copy_to, copy_from, argc, - TurboAssembler::kSrcLessThanDst); + MacroAssembler::kSrcLessThanDst); } } @@ -2681,8 +2677,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ SlotAddress(copy_to, 1); __ Bind(&loop); __ Sub(counter, counter, 1); - __ LoadAnyTaggedField(scratch, - MemOperand(bound_argv, kTaggedSize, PostIndex)); + __ LoadTaggedField(scratch, + MemOperand(bound_argv, kTaggedSize, PostIndex)); __ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex)); __ Cbnz(counter, &loop); } @@ -2703,15 +2699,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ AssertBoundFunction(x1); // Patch the receiver to [[BoundThis]]. - __ LoadAnyTaggedField(x10, - FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset)); + __ LoadTaggedField(x10, + FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset)); __ Poke(x10, __ ReceiverOperand(x0)); // Push the [[BoundArguments]] onto the stack. Generate_PushBoundArguments(masm); // Call the [[BoundTargetFunction]] via the Call builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset)); __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), RelocInfo::CODE_TARGET); @@ -2812,7 +2808,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { Label call_generic_stub; // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. - __ LoadTaggedPointerField( + __ LoadTaggedField( x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); __ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset)); __ TestAndBranchIfAllClear( @@ -2844,13 +2840,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { Label done; __ CmpTagged(x1, x3); __ B(ne, &done); - __ LoadTaggedPointerField( + __ LoadTaggedField( x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset)); __ Bind(&done); } // Construct the [[BoundTargetFunction]] via the Construct builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset)); __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); } @@ -2874,8 +2870,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ JumpIfSmi(target, &non_constructor); // Check if target has a [[Construct]] internal method. - __ LoadTaggedPointerField(map, - FieldMemOperand(target, HeapObject::kMapOffset)); + __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset)); { Register flags = x2; DCHECK(!AreAliased(argc, target, map, instance_type, flags)); @@ -2976,12 +2971,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register scratch = x10; Label allocate_vector, done; - __ LoadTaggedPointerField( + __ LoadTaggedField( vector, FieldMemOperand(kWasmInstanceRegister, WasmInstanceObject::kFeedbackVectorsOffset)); __ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2)); - __ LoadTaggedPointerField(vector, - FieldMemOperand(vector, FixedArray::kHeaderSize)); + __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); __ Push(vector, xzr); @@ -2996,7 +2990,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { // Save registers. __ PushXRegList(kSavedGpRegs); __ PushQRegList(kSavedFpRegs); - __ Push(lr, xzr); // xzr is for alignment. + __ Push(lr, xzr); // xzr is for alignment. // Arguments to the runtime function: instance, func_index, and an // additional stack slot for the NativeModule. The first pushed register @@ -3008,7 +3002,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Mov(vector, kReturnRegister0); // Restore registers and frame type. - __ Pop(xzr, lr); + __ Pop(xzr, lr); __ PopQRegList(kSavedFpRegs); __ PopXRegList(kSavedGpRegs); // Restore the instance from the frame. @@ -3121,8 +3115,8 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace, MemOperand(sp, -2 * kSystemPointerSize, PreIndex)); // We had to prepare the parameters for the Call: we have to put the context // into kContextRegister. - __ LoadAnyTaggedField( - kContextRegister, // cp(x27) + __ LoadTaggedField( + kContextRegister, // cp(x27) MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged( WasmInstanceObject::kNativeContextOffset))); } @@ -3210,7 +3204,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data, MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset)); __ Stp(wasm_instance, function_data, MemOperand(sp, -2 * kSystemPointerSize, PreIndex)); - __ LoadAnyTaggedField( + __ LoadTaggedField( kContextRegister, MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged( WasmInstanceObject::kNativeContextOffset))); @@ -3256,15 +3250,14 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance, wasm::JumpBuffer::Retired); } Register parent = tmp2; - __ LoadAnyTaggedField( - parent, - FieldMemOperand(active_continuation, - WasmContinuationObject::kParentOffset)); + __ LoadTaggedField(parent, + FieldMemOperand(active_continuation, + WasmContinuationObject::kParentOffset)); // Update active continuation root. int32_t active_continuation_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveContinuation); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveContinuation); __ Str(parent, MemOperand(kRootRegister, active_continuation_offset)); jmpbuf = parent; __ LoadExternalPointerField( @@ -3293,7 +3286,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset); __ Move(tmp2, Smi::FromInt(WasmSuspenderObject::kInactive)); __ StoreTaggedField(tmp2, state_loc); - __ LoadAnyTaggedField( + __ LoadTaggedField( suspender, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset)); __ CompareRoot(suspender, RootIndex::kUndefinedValue); @@ -3313,8 +3306,8 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, __ StoreTaggedField(tmp2, state_loc); __ bind(&undefined); int32_t active_suspender_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveSuspender); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveSuspender); __ Str(suspender, MemOperand(kRootRegister, active_suspender_offset)); } @@ -3322,17 +3315,16 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm, Register function_data, Register wasm_instance) { Register closure = function_data; - __ LoadAnyTaggedField( + __ LoadTaggedField( function_data, MemOperand( closure, wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction())); - __ LoadAnyTaggedField( + __ LoadTaggedField( function_data, - FieldMemOperand(function_data, - SharedFunctionInfo::kFunctionDataOffset)); + FieldMemOperand(function_data, SharedFunctionInfo::kFunctionDataOffset)); - __ LoadAnyTaggedField( + __ LoadTaggedField( wasm_instance, FieldMemOperand(function_data, WasmExportedFunctionData::kInstanceOffset)); @@ -3573,7 +3565,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // A result of AllocateSuspender is in the return register. __ Str(suspender, MemOperand(fp, kSuspenderOffset)); DEFINE_SCOPED(target_continuation); - __ LoadAnyTaggedField( + __ LoadTaggedField( target_continuation, FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset)); FREE_REG(suspender); @@ -4229,7 +4221,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { __ Mov(scratch, 1); __ Str(scratch, MemOperand(thread_in_wasm_flag_addr, 0)); - __ LoadAnyTaggedField( + __ LoadTaggedField( function_entry, FieldMemOperand(function_data, WasmExportedFunctionData::kInternalOffset)); @@ -4317,7 +4309,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // expected to be on the top of the stack). // We cannot use just the ret instruction for this, because we cannot pass // the number of slots to remove in a Register as an argument. - __ DropArguments(param_count, TurboAssembler::kCountExcludesReceiver); + __ DropArguments(param_count, MacroAssembler::kCountExcludesReceiver); __ Ret(lr); // ------------------------------------------- @@ -4497,7 +4489,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { regs.ResetExcept(promise, suspender, continuation); DEFINE_REG(suspender_continuation); - __ LoadAnyTaggedField( + __ LoadTaggedField( suspender_continuation, FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset)); if (v8_flags.debug_code) { @@ -4518,18 +4510,19 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { // Update roots. // ------------------------------------------- DEFINE_REG(caller); - __ LoadAnyTaggedField(caller, - FieldMemOperand(suspender_continuation, - WasmContinuationObject::kParentOffset)); + __ LoadTaggedField(caller, + FieldMemOperand(suspender_continuation, + WasmContinuationObject::kParentOffset)); int32_t active_continuation_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveContinuation); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveContinuation); __ Str(caller, MemOperand(kRootRegister, active_continuation_offset)); DEFINE_REG(parent); - __ LoadAnyTaggedField( + __ LoadTaggedField( parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset)); int32_t active_suspender_offset = - TurboAssembler::RootRegisterOffsetForRootIndex(RootIndex::kActiveSuspender); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveSuspender); __ Str(parent, MemOperand(kRootRegister, active_suspender_offset)); regs.ResetExcept(promise, caller); @@ -4596,7 +4589,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { // Load suspender from closure. // ------------------------------------------- DEFINE_REG(sfi); - __ LoadAnyTaggedField( + __ LoadTaggedField( sfi, MemOperand( closure, @@ -4606,12 +4599,12 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { // RecordWriteField calls later. DEFINE_PINNED(suspender, WriteBarrierDescriptor::ObjectRegister()); DEFINE_REG(function_data); - __ LoadAnyTaggedField( + __ LoadTaggedField( function_data, FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset)); // The write barrier uses a fixed register for the host object (rdi). The next // barrier is on the suspender, so load it in rdi directly. - __ LoadAnyTaggedField( + __ LoadTaggedField( suspender, FieldMemOperand(function_data, WasmResumeData::kSuspenderOffset)); // Check the suspender state. @@ -4660,8 +4653,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { scratch, FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset)); int32_t active_suspender_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveSuspender); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveSuspender); __ Str(suspender, MemOperand(kRootRegister, active_suspender_offset)); // Next line we are going to load a field from suspender, but we have to use @@ -4670,10 +4663,9 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { FREE_REG(suspender); DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister()); suspender = target_continuation; - __ LoadAnyTaggedField( + __ LoadTaggedField( target_continuation, - FieldMemOperand(suspender, - WasmSuspenderObject::kContinuationOffset)); + FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset)); suspender = no_reg; __ StoreTaggedField( @@ -4685,8 +4677,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore); FREE_REG(active_continuation); int32_t active_continuation_offset = - TurboAssembler::RootRegisterOffsetForRootIndex( - RootIndex::kActiveContinuation); + MacroAssembler::RootRegisterOffsetForRootIndex( + RootIndex::kActiveContinuation); __ Str(target_continuation, MemOperand(kRootRegister, active_continuation_offset)); @@ -4731,7 +4723,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { __ bind(&suspend); __ LeaveFrame(StackFrame::STACK_SWITCH); // Pop receiver + parameter. - __ DropArguments(2, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(2, MacroAssembler::kCountIncludesReceiver); __ Ret(lr); } } // namespace @@ -5320,12 +5312,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address, name)); - __ LoadAnyTaggedField(data, - FieldMemOperand(callback, AccessorInfo::kDataOffset)); + __ LoadTaggedField(data, + FieldMemOperand(callback, AccessorInfo::kDataOffset)); __ LoadRoot(undef, RootIndex::kUndefinedValue); __ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate())); - __ LoadTaggedPointerField( - name, FieldMemOperand(callback, AccessorInfo::kNameOffset)); + __ LoadTaggedField(name, + FieldMemOperand(callback, AccessorInfo::kNameOffset)); // PropertyCallbackArguments: // receiver, data, return value, return value default, isolate, holder, @@ -5384,9 +5376,9 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { // DirectCEntry places the return address on the stack (updated by the GC), // making the call GC safe. The irregexp backend relies on this. - __ Poke(lr, 0); // Store the return address. + __ Poke(lr, 0); // Store the return address. __ Blr(x10); // Call the C++ function. - __ Peek(lr, 0); // Return to calling code. + __ Peek(lr, 0); // Return to calling code. __ AssertFPCRState(); __ Ret(); } @@ -5696,10 +5688,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Get the InstructionStream object from the shared function info. Register code_obj = x22; - __ LoadTaggedPointerField( + __ LoadTaggedField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); @@ -5731,11 +5723,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Load the feedback vector. Register feedback_vector = x2; - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc index 6dbdc29c89..04205c34f5 100644 --- a/src/builtins/ia32/builtins-ia32.cc +++ b/src/builtins/ia32/builtins-ia32.cc @@ -125,8 +125,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ ret(0); __ bind(&stack_overflow); @@ -280,8 +280,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ ret(0); // Otherwise we do a smi check and fall through to check if the return value @@ -768,8 +768,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ leave(); // Drop receiver + arguments. - __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1810,8 +1810,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { } __ bind(&no_this_arg); __ DropArgumentsAndPushNewReceiver(eax, edi, ecx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // Restore receiver to edi. __ movd(edi, xmm0); @@ -1919,8 +1919,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ movd(xmm0, edx); __ DropArgumentsAndPushNewReceiver(eax, ecx, edx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // Restore argumentsList. __ movd(edx, xmm0); @@ -1978,8 +1978,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ DropArgumentsAndPushNewReceiver( eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // Restore argumentsList. __ movd(ecx, xmm0); diff --git a/src/builtins/loong64/builtins-loong64.cc b/src/builtins/loong64/builtins-loong64.cc index 2fe4a2a914..c5d18055d0 100644 --- a/src/builtins/loong64/builtins-loong64.cc +++ b/src/builtins/loong64/builtins-loong64.cc @@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(t3, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver, t3); + __ DropArguments(t3, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, t3); __ Ret(); } @@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(a1, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver, a4); + __ DropArguments(a1, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, a4); __ Ret(); __ bind(&check_receiver); @@ -803,8 +803,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1328,7 +1328,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, // Push the arguments. __ PushArray(start_address, num_args, scratch, scratch2, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1794,8 +1794,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Movz(arg_array, undefined_value, scratch); // if argc == 1 __ Ld_d(receiver, MemOperand(sp, 0)); __ DropArgumentsAndPushNewReceiver(argc, this_arg, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1889,8 +1889,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Movz(arguments_list, undefined_value, scratch); // if argc == 2 __ DropArgumentsAndPushNewReceiver(argc, this_argument, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1949,8 +1949,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ Movz(new_target, target, scratch); // if argc == 2 __ DropArgumentsAndPushNewReceiver(argc, undefined_value, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc index 3329065e97..cf40b1062f 100644 --- a/src/builtins/mips64/builtins-mips64.cc +++ b/src/builtins/mips64/builtins-mips64.cc @@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { } // Remove caller arguments from the stack and return. - __ DropArguments(t3, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver, t3); + __ DropArguments(t3, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, t3); __ Ret(); } @@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(a1, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver, a4); + __ DropArguments(a1, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver, a4); __ Ret(); __ bind(&check_receiver); @@ -804,8 +804,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1320,7 +1320,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, // Push the arguments. __ PushArray(start_address, num_args, scratch, scratch2, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1784,8 +1784,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ Movz(arg_array, undefined_value, scratch); // if argc == 1 __ Ld(receiver, MemOperand(sp)); __ DropArgumentsAndPushNewReceiver(argc, this_arg, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1881,8 +1881,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ Movz(arguments_list, undefined_value, scratch); // if argc == 2 __ DropArgumentsAndPushNewReceiver(argc, this_argument, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1941,8 +1941,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ Movz(new_target, target, scratch); // if argc == 2 __ DropArgumentsAndPushNewReceiver(argc, undefined_value, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc index a1c992864f..99c77b7128 100644 --- a/src/builtins/ppc/builtins-ppc.cc +++ b/src/builtins/ppc/builtins-ppc.cc @@ -64,7 +64,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, } __ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0); __ bne(&done); - __ LoadTaggedPointerField( + __ LoadTaggedField( sfi_data, FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0); @@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Get the InstructionStream object from the shared function info. Register code_obj = r9; - __ LoadTaggedPointerField( + __ LoadTaggedField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0); - __ LoadTaggedPointerField( + __ LoadTaggedField( code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0); @@ -155,12 +155,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Load the feedback vector. Register feedback_vector = r5; - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset), - r0); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), + r0); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset), r0); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -361,8 +360,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Leave construct frame. } // Remove caller arguments from the stack and return. - __ DropArguments(scratch, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(scratch, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ blr(); __ bind(&stack_overflow); @@ -431,7 +430,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load deoptimization data from the code object. // = [#deoptimization_data_offset] - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand( r3, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset), @@ -495,7 +494,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // -- sp[4*kSystemPointerSize]: context // ----------------------------------- - __ LoadTaggedPointerField( + __ LoadTaggedField( r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0); __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(r7); @@ -611,8 +610,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(r4, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(r4, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ blr(); __ bind(&check_receiver); @@ -660,10 +659,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ AssertGeneratorObject(r4); // Load suspended function and context. - __ LoadTaggedPointerField( + __ LoadTaggedField( r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0); - __ LoadTaggedPointerField(cp, FieldMemOperand(r7, JSFunction::kContextOffset), - r0); + __ LoadTaggedField(cp, FieldMemOperand(r7, JSFunction::kContextOffset), r0); // Flood function if we are stepping. Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; @@ -703,12 +701,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // ----------------------------------- // Copy the function arguments from the generator object's register file. - __ LoadTaggedPointerField( + __ LoadTaggedField( r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0); __ LoadU16( r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset)); __ subi(r6, r6, Operand(kJSArgcReceiverSlots)); - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset), r0); { @@ -719,14 +717,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ blt(&done_loop); __ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2)); __ add(scratch, r5, r10); - __ LoadAnyTaggedField( - scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0); + __ LoadTaggedField(scratch, + FieldMemOperand(scratch, FixedArray::kHeaderSize), r0); __ Push(scratch); __ b(&loop); __ bind(&done_loop); // Push receiver. - __ LoadAnyTaggedField( + __ LoadTaggedField( scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0); __ Push(scratch); } @@ -734,9 +732,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Underlying function needs to have bytecode available. if (v8_flags.debug_code) { Label is_baseline; - __ LoadTaggedPointerField( + __ LoadTaggedField( r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0); - __ LoadTaggedPointerField( + __ LoadTaggedField( r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0); GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline); __ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE); @@ -746,7 +744,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Resume (Ignition/TurboFan) generator object. { - __ LoadTaggedPointerField( + __ LoadTaggedField( r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0); __ LoadU16(r3, FieldMemOperand( r3, SharedFunctionInfo::kFormalParameterCountOffset)); @@ -756,8 +754,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ mr(r6, r4); __ mr(r4, r7); static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch"); - __ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), - r0); + __ LoadTaggedField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0); __ JumpCodeObject(r5); } @@ -769,7 +766,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(r4); - __ LoadTaggedPointerField( + __ LoadTaggedField( r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0); } __ b(&stepping_prepared); @@ -780,7 +777,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Push(r4); __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); __ Pop(r4); - __ LoadTaggedPointerField( + __ LoadTaggedField( r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0); } __ b(&stepping_prepared); @@ -1119,8 +1116,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, // Leave the frame (also dropping the register file). __ LeaveFrame(StackFrame::INTERPRETED); - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1212,12 +1209,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kClosure); // Load the feedback vector from the closure. Register feedback_vector = ip; - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset), - r0); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), + r0); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset), r0); __ AssertFeedbackVector(feedback_vector, r11); // Check for an tiering state. @@ -1378,10 +1374,10 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Get the bytecode array from the function object and load it into // kInterpreterBytecodeArrayRegister. - __ LoadTaggedPointerField( + __ LoadTaggedField( r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0); // Load original bytecode array or the debug copy. - __ LoadTaggedPointerField( + __ LoadTaggedField( kInterpreterBytecodeArrayRegister, FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0); @@ -1397,17 +1393,16 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ bne(&compile_lazy); // Load the feedback vector from the closure. - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset), - r0); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), + r0); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset), r0); Label push_stack_frame; // Check if feedback vector is valid. If valid, check for optimized code // and update invocation count. Otherwise, setup the stack frame. - __ LoadTaggedPointerField( + __ LoadTaggedField( r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0); __ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset)); __ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE)); @@ -1589,17 +1584,17 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ bind(&is_baseline); { // Load the feedback vector from the closure. - __ LoadTaggedPointerField( + __ LoadTaggedField( feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset), - r0); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset), + r0); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to // allocate it. - __ LoadTaggedPointerField( + __ LoadTaggedField( ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0); __ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); __ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0); @@ -1636,7 +1631,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, __ sub(start_address, start_address, scratch); // Push the arguments. __ PushArray(start_address, num_args, scratch, r0, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1773,16 +1768,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { // get the custom trampoline, otherwise grab the entry address of the global // trampoline. __ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0); - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0); __ CompareObjectType(r5, kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister, INTERPRETER_DATA_TYPE); __ bne(&builtin_trampoline); - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset), r0); __ LoadCodeEntry(r5, r5); @@ -2027,8 +2022,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2111,8 +2106,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2160,8 +2155,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ blt(&done); __ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r3, r7, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2240,8 +2235,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0. Label ok, fail; __ AssertNotSmi(r5); - __ LoadTaggedPointerField(scratch, - FieldMemOperand(r5, HeapObject::kMapOffset), r0); + __ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset), + r0); __ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); __ cmpi(scratch, Operand(FIXED_ARRAY_TYPE)); __ beq(&ok); @@ -2276,7 +2271,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize)); __ mtctr(r7); __ bind(&loop); - __ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize), r0); + __ LoadTaggedField(scratch, MemOperand(r5, kTaggedSize), r0); __ addi(r5, r5, Operand(kTaggedSize)); __ CompareRoot(scratch, RootIndex::kTheHoleValue); __ bne(&skip); @@ -2311,8 +2306,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, if (mode == CallOrConstructMode::kConstruct) { Label new_target_constructor, new_target_not_constructor; __ JumpIfSmi(r6, &new_target_not_constructor); - __ LoadTaggedPointerField(scratch, - FieldMemOperand(r6, HeapObject::kMapOffset), r0); + __ LoadTaggedField(scratch, FieldMemOperand(r6, HeapObject::kMapOffset), + r0); __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); __ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0); __ bne(&new_target_constructor, cr0); @@ -2395,14 +2390,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // ----------------------------------- __ AssertCallableFunction(r4); - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function // context in case of conversion. - __ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), - r0); + __ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0); // We need to convert the receiver for non-native sloppy mode functions. Label done_convert; __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset)); @@ -2456,7 +2450,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Pop(r3, r4); __ SmiUntag(r3); } - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0); __ bind(&convert_receiver); } @@ -2487,7 +2481,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Load [[BoundArguments]] into r5 and length of that into r7. Label no_bound_arguments; - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0); __ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0); __ beq(&no_bound_arguments, cr0); @@ -2536,7 +2530,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ subi(r7, r7, Operand(1)); __ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2)); __ add(scratch, scratch, r5); - __ LoadAnyTaggedField(scratch, MemOperand(scratch), r0); + __ LoadTaggedField(scratch, MemOperand(scratch), r0); __ Push(scratch); __ bdnz(&loop); __ bind(&done); @@ -2559,15 +2553,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ AssertBoundFunction(r4); // Patch the receiver to [[BoundThis]]. - __ LoadAnyTaggedField( - r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset), r0); + __ LoadTaggedField(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset), + r0); __ StoreReceiver(r6, r3, ip); // Push the [[BoundArguments]] onto the stack. Generate_PushBoundArguments(masm); // Call the [[BoundTargetFunction]] via the Call builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0); __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), RelocInfo::CODE_TARGET); @@ -2667,7 +2661,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { Label call_generic_stub; // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. - __ LoadTaggedPointerField( + __ LoadTaggedField( r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0); __ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset)); __ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); @@ -2699,12 +2693,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { Label skip; __ CompareTagged(r4, r6); __ bne(&skip); - __ LoadTaggedPointerField( + __ LoadTaggedField( r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0); __ bind(&skip); // Construct the [[BoundTargetFunction]] via the Construct builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0); __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); } @@ -2728,8 +2722,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ JumpIfSmi(target, &non_constructor); // Check if target has a [[Construct]] internal method. - __ LoadTaggedPointerField( - map, FieldMemOperand(target, HeapObject::kMapOffset), r0); + __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset), r0); { Register flags = r5; DCHECK(!AreAliased(argc, target, map, instance_type, flags)); @@ -2817,15 +2810,15 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register scratch = ip; Label allocate_vector, done; - __ LoadTaggedPointerField( + __ LoadTaggedField( vector, FieldMemOperand(kWasmInstanceRegister, WasmInstanceObject::kFeedbackVectorsOffset), scratch); __ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2)); __ AddS64(vector, vector, scratch); - __ LoadTaggedPointerField( - vector, FieldMemOperand(vector, FixedArray::kHeaderSize), scratch); + __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize), + scratch); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); __ push(kWasmInstanceRegister); @@ -3530,16 +3523,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ push(receiver); // Push data from AccessorInfo. - __ LoadAnyTaggedField( - scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r0); + __ LoadTaggedField(scratch, + FieldMemOperand(callback, AccessorInfo::kDataOffset), r0); __ push(scratch); __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ Push(scratch, scratch); __ Move(scratch, ExternalReference::isolate_address(masm->isolate())); __ Push(scratch, holder); __ Push(Smi::zero()); // should_throw_on_error -> false - __ LoadTaggedPointerField( - scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r0); + __ LoadTaggedField(scratch, + FieldMemOperand(callback, AccessorInfo::kNameOffset), r0); __ push(scratch); // v8::PropertyCallbackInfo::args_ array and name handle. diff --git a/src/builtins/riscv/builtins-riscv.cc b/src/builtins/riscv/builtins-riscv.cc index 030595c51d..ca30c6f732 100644 --- a/src/builtins/riscv/builtins-riscv.cc +++ b/src/builtins/riscv/builtins-riscv.cc @@ -155,7 +155,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { { UseScratchRegisterScope temps(masm); Register func_info = temps.Acquire(); - __ LoadTaggedPointerField( + __ LoadTaggedField( func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ Load32U(func_info, FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset)); @@ -353,7 +353,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear); - __ LoadTaggedPointerField( + __ LoadTaggedField( sfi_data, FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); @@ -377,10 +377,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ AssertGeneratorObject(a1); // Load suspended function and context. - __ LoadTaggedPointerField( - a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); - __ LoadTaggedPointerField(cp, - FieldMemOperand(a4, JSFunction::kContextOffset)); + __ LoadTaggedField(a4, + FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(cp, FieldMemOperand(a4, JSFunction::kContextOffset)); // Flood function if we are stepping. Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; @@ -417,12 +416,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // context allocation for any variables in generators, the actual argument // values have already been copied into the context and these dummy values // will never be used. - __ LoadTaggedPointerField( + __ LoadTaggedField( a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); __ Lhu(a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); __ SubWord(a3, a3, Operand(kJSArgcReceiverSlots)); - __ LoadTaggedPointerField( + __ LoadTaggedField( t1, FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset)); { @@ -431,23 +430,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ SubWord(a3, a3, Operand(1)); __ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear); __ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2); - __ LoadAnyTaggedField( - kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); + __ LoadTaggedField(kScratchReg, + FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); __ Push(kScratchReg); __ Branch(&loop); __ bind(&done_loop); // Push receiver. - __ LoadAnyTaggedField( - kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); + __ LoadTaggedField(kScratchReg, + FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); __ Push(kScratchReg); } // Underlying function needs to have bytecode available. if (v8_flags.debug_code) { Label is_baseline; - __ LoadTaggedPointerField( + __ LoadTaggedField( a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline); __ GetObjectType(a3, a3, a3); @@ -458,7 +457,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Resume (Ignition/TurboFan) generator object. { - __ LoadTaggedPointerField( + __ LoadTaggedField( a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); __ Lhu(a0, FieldMemOperand( a0, SharedFunctionInfo::kFormalParameterCountOffset)); @@ -468,7 +467,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Move(a3, a1); __ Move(a1, a4); static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - __ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); + __ LoadTaggedField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); __ JumpCodeObject(a2); } @@ -481,8 +480,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(a1); } - __ LoadTaggedPointerField( - a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(a4, + FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); __ Branch(&stepping_prepared); __ bind(&prepare_step_in_suspended_generator); @@ -492,8 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); __ Pop(a1); } - __ LoadTaggedPointerField( - a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(a4, + FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); __ Branch(&stepping_prepared); __ bind(&stack_overflow); @@ -1130,10 +1129,10 @@ void Builtins::Generate_InterpreterEntryTrampoline( Register feedback_vector = a2; // Get the bytecode array from the function object and load it into // kInterpreterBytecodeArrayRegister. - __ LoadTaggedPointerField( + __ LoadTaggedField( kScratchReg, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( kInterpreterBytecodeArrayRegister, FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); Label is_baseline; @@ -1147,17 +1146,16 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE)); // Load the feedback vector from the closure. - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label push_stack_frame; // Check if feedback vector is valid. If valid, check for optimized code // and update invocation count. Otherwise, setup the stack frame. - __ LoadTaggedPointerField( - a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ LoadTaggedField(a4, + FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); __ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset)); __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE), Label::Distance::kNear); @@ -1331,16 +1329,16 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ bind(&is_baseline); { // Load the feedback vector from the closure. - __ LoadTaggedPointerField( + __ LoadTaggedField( feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to // allocate it. - __ LoadTaggedPointerField( + __ LoadTaggedField( t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); __ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE)); @@ -1381,7 +1379,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, // Push the arguments. __ PushArray(start_address, num_args, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1511,16 +1509,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { // get the custom trampoline, otherwise grab the entry address of the global // trampoline. __ LoadWord(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); __ GetObjectType(t0, kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister); __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear); - __ LoadTaggedPointerField( + __ LoadTaggedField( t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); __ LoadCodeEntry(t0, t0); __ BranchShort(&trampoline_loaded); @@ -1778,7 +1776,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load deoptimization data from the code object. // = [#deoptimization_data_offset] - __ LoadTaggedPointerField( + __ LoadTaggedField( a1, MemOperand(a0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset - @@ -2152,7 +2150,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ SubWord(scratch, sp, Operand(scratch)); __ LoadRoot(hole_value, RootIndex::kTheHoleValue); __ bind(&loop); - __ LoadTaggedPointerField(a5, MemOperand(src)); + __ LoadTaggedField(a5, MemOperand(src)); __ AddWord(src, src, kTaggedSize); __ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear); __ LoadRoot(a5, RootIndex::kUndefinedValue); @@ -2190,8 +2188,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ JumpIfSmi(a3, &new_target_not_constructor); - __ LoadTaggedPointerField(scratch, - FieldMemOperand(a3, HeapObject::kMapOffset)); + __ LoadTaggedField(scratch, FieldMemOperand(a3, HeapObject::kMapOffset)); __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask)); __ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg), @@ -2271,7 +2268,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ AssertCallableFunction(a1); Label class_constructor; - __ LoadTaggedPointerField( + __ LoadTaggedField( a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); __ And(kScratchReg, a3, @@ -2281,8 +2278,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function // context in case of conversion. - __ LoadTaggedPointerField(cp, - FieldMemOperand(a1, JSFunction::kContextOffset)); + __ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); // We need to convert the receiver for non-native sloppy mode functions. Label done_convert; __ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); @@ -2337,7 +2333,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Pop(a0, a1); __ SmiUntag(a0); } - __ LoadTaggedPointerField( + __ LoadTaggedField( a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ bind(&convert_receiver); } @@ -2379,7 +2375,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { Register bound_argv = a2; // Load [[BoundArguments]] into a2 and length of that into a4. Label no_bound_arguments; - __ LoadTaggedPointerField( + __ LoadTaggedField( bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); __ SmiUntagField(bound_argc, FieldMemOperand(bound_argv, FixedArray::kLengthOffset)); @@ -2423,7 +2419,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ SubWord(a4, a4, Operand(1)); __ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear); __ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2); - __ LoadAnyTaggedField(kScratchReg, MemOperand(a5)); + __ LoadTaggedField(kScratchReg, MemOperand(a5)); __ Push(kScratchReg); __ Branch(&loop); __ bind(&done_loop); @@ -2449,8 +2445,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { { UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); - __ LoadAnyTaggedField( - scratch, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); + __ LoadTaggedField(scratch, + FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); __ StoreReceiver(scratch, a0, kScratchReg); } @@ -2458,7 +2454,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { Generate_PushBoundArguments(masm); // Call the [[BoundTargetFunction]] via the Call builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), RelocInfo::CODE_TARGET); @@ -2548,7 +2544,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { Label call_generic_stub; // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. - __ LoadTaggedPointerField( + __ LoadTaggedField( a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); __ Load32U(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset)); __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); @@ -2587,12 +2583,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { __ Branch(&skip, ne, a1, Operand(a3), Label::Distance::kNear); #endif } - __ LoadTaggedPointerField( + __ LoadTaggedField( a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); __ bind(&skip); // Construct the [[BoundTargetFunction]] via the Construct builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); } @@ -2615,7 +2611,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { temps.Include(t0, t1); Register map = temps.Acquire(); Register scratch = temps.Acquire(); - __ LoadTaggedPointerField(map, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ LoadTaggedField(map, FieldMemOperand(a1, HeapObject::kMapOffset)); __ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); __ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask)); __ Branch(&non_constructor, eq, scratch, Operand(zero_reg)); @@ -3366,8 +3362,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ SubWord(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize); __ StoreWord(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize)); - __ LoadAnyTaggedField(scratch, - FieldMemOperand(callback, AccessorInfo::kDataOffset)); + __ LoadTaggedField(scratch, + FieldMemOperand(callback, AccessorInfo::kDataOffset)); __ StoreWord(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize)); __ LoadRoot(scratch, RootIndex::kUndefinedValue); @@ -3385,8 +3381,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { DCHECK_EQ(0, Smi::zero().ptr()); __ StoreWord(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kSystemPointerSize)); - __ LoadTaggedPointerField( - scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); + __ LoadTaggedField(scratch, + FieldMemOperand(callback, AccessorInfo::kNameOffset)); __ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize)); // v8::PropertyCallbackInfo::args_ array and name handle. @@ -3677,10 +3673,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Get the InstructionStream object from the shared function info. Register code_obj = s1; - __ LoadTaggedPointerField( + __ LoadTaggedField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); @@ -3719,11 +3715,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Replace BytecodeOffset with the feedback vector. Register feedback_vector = a2; - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to // allocate it. diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc index 7cb579da48..49576f2318 100644 --- a/src/builtins/s390/builtins-s390.cc +++ b/src/builtins/s390/builtins-s390.cc @@ -65,7 +65,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, } __ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE)); __ bne(&done); - __ LoadTaggedPointerField( + __ LoadTaggedField( sfi_data, FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); @@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Get the InstructionStream object from the shared function info. Register code_obj = r8; - __ LoadTaggedPointerField( + __ LoadTaggedField( code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( code_obj, FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); @@ -155,11 +155,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Load the feedback vector. Register feedback_vector = r4; - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -320,7 +319,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load deoptimization data from the code object. // = [#deoptimization_data_offset] - __ LoadTaggedPointerField( + __ LoadTaggedField( r3, FieldMemOperand( r2, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset)); @@ -428,8 +427,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Leave construct frame. } // Remove caller arguments from the stack and return. - __ DropArguments(scratch, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(scratch, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ Ret(); __ bind(&stack_overflow); @@ -472,7 +471,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // -- sp[4*kSystemPointerSize]: context // ----------------------------------- - __ LoadTaggedPointerField( + __ LoadTaggedField( r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); __ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(r6); @@ -584,8 +583,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. - __ DropArguments(r3, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(r3, MacroAssembler::kCountIsSmi, + MacroAssembler::kCountIncludesReceiver); __ Ret(); __ bind(&check_receiver); @@ -633,10 +632,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ AssertGeneratorObject(r3); // Load suspended function and context. - __ LoadTaggedPointerField( - r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset)); - __ LoadTaggedPointerField(cp, - FieldMemOperand(r6, JSFunction::kContextOffset)); + __ LoadTaggedField(r6, + FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(cp, FieldMemOperand(r6, JSFunction::kContextOffset)); // Flood function if we are stepping. Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; @@ -677,12 +675,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // ----------------------------------- // Copy the function arguments from the generator object's register file. - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset)); __ LoadU16( r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset)); __ SubS64(r5, r5, Operand(kJSArgcReceiverSlots)); - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset)); { @@ -692,24 +690,24 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ blt(&done_loop); __ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2)); __ la(scratch, MemOperand(r4, r1)); - __ LoadAnyTaggedField(scratch, - FieldMemOperand(scratch, FixedArray::kHeaderSize)); + __ LoadTaggedField(scratch, + FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ Push(scratch); __ b(&loop); __ bind(&done_loop); // Push receiver. - __ LoadAnyTaggedField( - scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset)); + __ LoadTaggedField(scratch, + FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset)); __ Push(scratch); } // Underlying function needs to have bytecode available. if (v8_flags.debug_code) { Label is_baseline; - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset)); GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline); __ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE); @@ -719,7 +717,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Resume (Ignition/TurboFan) generator object. { - __ LoadTaggedPointerField( + __ LoadTaggedField( r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset)); __ LoadS16( r2, @@ -730,7 +728,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ mov(r5, r3); __ mov(r3, r6); static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); - __ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset)); + __ LoadTaggedField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset)); __ JumpCodeObject(r4); } @@ -742,8 +740,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(r3); - __ LoadTaggedPointerField( - r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(r6, + FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset)); } __ b(&stepping_prepared); @@ -753,8 +751,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Push(r3); __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); __ Pop(r3); - __ LoadTaggedPointerField( - r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(r6, + FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset)); } __ b(&stepping_prepared); @@ -1148,8 +1146,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, // Leave the frame (also dropping the register file). __ LeaveFrame(StackFrame::INTERPRETED); - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Advance the current bytecode offset. This simulates what all bytecode @@ -1245,11 +1243,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kClosure); // Load the feedback vector from the closure. Register feedback_vector = ip; - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); __ AssertFeedbackVector(feedback_vector, r1); // Check for an tiering state. @@ -1406,10 +1403,10 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Get the bytecode array from the function object and load it into // kInterpreterBytecodeArrayRegister. - __ LoadTaggedPointerField( + __ LoadTaggedField( r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); // Load original bytecode array or the debug copy. - __ LoadTaggedPointerField( + __ LoadTaggedField( kInterpreterBytecodeArrayRegister, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset)); @@ -1425,17 +1422,16 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ bne(&compile_lazy); // Load the feedback vector from the closure. - __ LoadTaggedPointerField( - feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label push_stack_frame; // Check if feedback vector is valid. If valid, check for optimized code // and update invocation count. Otherwise, setup the stack frame. - __ LoadTaggedPointerField( - r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ LoadTaggedField(r6, + FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); __ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset)); __ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE)); __ bne(&push_stack_frame); @@ -1611,16 +1607,16 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ bind(&is_baseline); { // Load the feedback vector from the closure. - __ LoadTaggedPointerField( + __ LoadTaggedField( feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField( - feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + __ LoadTaggedField(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to // allocate it. - __ LoadTaggedPointerField( + __ LoadTaggedField( ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); __ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); __ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE)); @@ -1657,7 +1653,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, __ SubS64(start_address, start_address, scratch); // Push the arguments. __ PushArray(start_address, num_args, r1, scratch, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1792,16 +1788,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { // get the custom trampoline, otherwise grab the entry address of the global // trampoline. __ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset)); __ CompareObjectType(r4, kInterpreterDispatchTableRegister, kInterpreterDispatchTableRegister, INTERPRETER_DATA_TYPE); __ bne(&builtin_trampoline); - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset)); __ LoadCodeEntry(r4, r4); __ b(&trampoline_loaded); @@ -2022,8 +2018,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2107,8 +2103,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2157,8 +2153,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ blt(&done); __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); - __ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArgumentsAndPushNewReceiver(r2, r6, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2240,8 +2236,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0. Label ok, fail; __ AssertNotSmi(r4); - __ LoadTaggedPointerField(scratch, - FieldMemOperand(r4, HeapObject::kMapOffset)); + __ LoadTaggedField(scratch, FieldMemOperand(r4, HeapObject::kMapOffset)); __ LoadS16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); __ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE)); @@ -2277,7 +2272,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize)); __ mov(r1, r6); __ bind(&loop); - __ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0); + __ LoadTaggedField(scratch, MemOperand(r4, kTaggedSize), r0); __ la(r4, MemOperand(r4, kTaggedSize)); __ CompareRoot(scratch, RootIndex::kTheHoleValue); __ bne(&skip, Label::kNear); @@ -2312,8 +2307,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, if (mode == CallOrConstructMode::kConstruct) { Label new_target_constructor, new_target_not_constructor; __ JumpIfSmi(r5, &new_target_not_constructor); - __ LoadTaggedPointerField(scratch, - FieldMemOperand(r5, HeapObject::kMapOffset)); + __ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset)); __ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); __ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift)); __ bne(&new_target_constructor); @@ -2397,14 +2391,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // ----------------------------------- __ AssertCallableFunction(r3); - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function // context in case of conversion. - __ LoadTaggedPointerField(cp, - FieldMemOperand(r3, JSFunction::kContextOffset)); + __ LoadTaggedField(cp, FieldMemOperand(r3, JSFunction::kContextOffset)); // We need to convert the receiver for non-native sloppy mode functions. Label done_convert; __ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset)); @@ -2458,7 +2451,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Pop(r2, r3); __ SmiUntag(r2); } - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); __ bind(&convert_receiver); } @@ -2489,7 +2482,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Load [[BoundArguments]] into r4 and length of that into r6. Label no_bound_arguments; - __ LoadTaggedPointerField( + __ LoadTaggedField( r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset)); __ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset)); __ LoadAndTestP(r6, r6); @@ -2535,7 +2528,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { __ bind(&loop); __ SubS64(r1, r6, Operand(1)); __ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2)); - __ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0); + __ LoadTaggedField(scratch, MemOperand(r4, r1), r0); __ Push(scratch); __ SubS64(r6, r6, Operand(1)); __ bgt(&loop); @@ -2559,15 +2552,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { __ AssertBoundFunction(r3); // Patch the receiver to [[BoundThis]]. - __ LoadAnyTaggedField(r5, - FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset)); + __ LoadTaggedField(r5, + FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset)); __ StoreReceiver(r5, r2, r1); // Push the [[BoundArguments]] onto the stack. Generate_PushBoundArguments(masm); // Call the [[BoundTargetFunction]] via the Call builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset)); __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), RelocInfo::CODE_TARGET); @@ -2667,7 +2660,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { Label call_generic_stub; // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. - __ LoadTaggedPointerField( + __ LoadTaggedField( r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); __ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset)); __ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); @@ -2698,12 +2691,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { Label skip; __ CompareTagged(r3, r5); __ bne(&skip); - __ LoadTaggedPointerField( + __ LoadTaggedField( r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset)); __ bind(&skip); // Construct the [[BoundTargetFunction]] via the Construct builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset)); __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); } @@ -2727,8 +2720,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ JumpIfSmi(target, &non_constructor); // Check if target has a [[Construct]] internal method. - __ LoadTaggedPointerField(map, - FieldMemOperand(target, HeapObject::kMapOffset)); + __ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset)); { Register flags = r4; DCHECK(!AreAliased(argc, target, map, instance_type, flags)); @@ -2811,13 +2803,12 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { Register scratch = r0; Label allocate_vector, done; - __ LoadTaggedPointerField( + __ LoadTaggedField( vector, FieldMemOperand(kWasmInstanceRegister, WasmInstanceObject::kFeedbackVectorsOffset)); __ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2)); __ AddS64(vector, vector, scratch); - __ LoadTaggedPointerField(vector, - FieldMemOperand(vector, FixedArray::kHeaderSize)); + __ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize)); __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); __ push(kWasmInstanceRegister); @@ -3504,16 +3495,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ push(receiver); // Push data from AccessorInfo. - __ LoadAnyTaggedField( - scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1); + __ LoadTaggedField(scratch, + FieldMemOperand(callback, AccessorInfo::kDataOffset), r1); __ push(scratch); __ LoadRoot(scratch, RootIndex::kUndefinedValue); __ Push(scratch, scratch); __ Move(scratch, ExternalReference::isolate_address(masm->isolate())); __ Push(scratch, holder); __ Push(Smi::zero()); // should_throw_on_error -> false - __ LoadTaggedPointerField( - scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1); + __ LoadTaggedField(scratch, + FieldMemOperand(callback, AccessorInfo::kNameOffset), r1); __ push(scratch); // v8::PropertyCallbackInfo::args_ array and name handle. diff --git a/src/builtins/wasm.tq b/src/builtins/wasm.tq index fa4be0c887..e53e3c0a20 100644 --- a/src/builtins/wasm.tq +++ b/src/builtins/wasm.tq @@ -61,6 +61,7 @@ extern runtime WasmStringViewWtf8Slice( Context, ByteArray, Number, Number): String; extern runtime WasmStringCompare(NoContext, String, String): Smi; extern runtime WasmStringFromCodePoint(Context, Number): String; +extern runtime WasmStringHash(NoContext, String): Smi; extern runtime WasmJSToWasmObject(Context, JSAny, Smi): JSAny; } @@ -699,6 +700,10 @@ builtin ThrowWasmTrapArrayTooLarge(): JSAny { tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapArrayTooLarge)); } +builtin ThrowWasmTrapStringOffsetOutOfBounds(): JSAny { + tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapStringOffsetOutOfBounds)); +} + macro TryNumberToIntptr(value: JSAny): intptr labels Failure { typeswitch (value) { case (s: Smi): { @@ -939,6 +944,13 @@ builtin WasmStringNewWtf16Array( } } +// Contract: input is any string, output is a string that the TF operator +// "StringPrepareForGetCodeunit" can handle. +builtin WasmStringAsWtf16(str: String): String { + const cons = Cast(str) otherwise return str; + return Flatten(cons); +} + builtin WasmStringConst(index: uint32): String { const instance = LoadInstanceFromFrame(); tail runtime::WasmStringConst( @@ -1254,6 +1266,11 @@ builtin WasmStringFromCodePoint(codePoint: uint32): String { LoadContextFromFrame(), WasmUint32ToNumber(codePoint)); } +builtin WasmStringHash(string: String): int32 { + const result = runtime::WasmStringHash(kNoContext, string); + return SmiToInt32(result); +} + builtin WasmExternInternalize(externObject: JSAny): JSAny { const instance = LoadInstanceFromFrame(); const context = LoadContextFromInstance(instance); diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc index c93632d92d..b447c1e7d9 100644 --- a/src/builtins/x64/builtins-x64.cc +++ b/src/builtins/x64/builtins-x64.cc @@ -125,7 +125,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // Remove caller arguments from the stack and return. __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIncludesReceiver); __ ret(0); @@ -171,9 +171,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------------------------------- const TaggedRegister shared_function_info(rbx); - __ LoadTaggedPointerField( - shared_function_info, - FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedField(shared_function_info, + FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movl(rbx, FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset)); __ DecodeField(rbx); @@ -282,7 +281,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ LeaveFrame(StackFrame::CONSTRUCT); // Remove caller arguments from the stack and return. __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIncludesReceiver); __ ret(0); // If the result is a smi, it is *not* an object in the ECMA sense. @@ -701,7 +700,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, __ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE); __ j(not_equal, &done, Label::kNear); - __ LoadTaggedPointerField( + __ LoadTaggedField( sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); __ bind(&done); @@ -729,9 +728,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg; // Load suspended function and context. - __ LoadTaggedPointerField( - rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset)); - __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + __ LoadTaggedField(rdi, + FieldOperand(rdx, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); // Flood function if we are stepping. Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; @@ -768,12 +767,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // ----------------------------------- // Copy the function arguments from the generator object's register file. - __ LoadTaggedPointerField( - rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedField(rcx, + FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movzxwq( rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset)); __ decq(rcx); // Exclude receiver. - __ LoadTaggedPointerField( + __ LoadTaggedField( rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset)); { @@ -781,24 +780,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ bind(&loop); __ decq(rcx); __ j(less, &done_loop, Label::kNear); - __ PushTaggedAnyField( + __ PushTaggedField( FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize), decompr_scratch1); __ jmp(&loop); __ bind(&done_loop); // Push the receiver. - __ PushTaggedPointerField( - FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), - decompr_scratch1); + __ PushTaggedField(FieldOperand(rdx, JSGeneratorObject::kReceiverOffset), + decompr_scratch1); } // Underlying function needs to have bytecode available. if (v8_flags.debug_code) { Label is_baseline, ok; - __ LoadTaggedPointerField( + __ LoadTaggedField( rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset)); GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister, &is_baseline); @@ -816,7 +814,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Resume (Ignition/TurboFan) generator object. { __ PushReturnAddressFrom(rax); - __ LoadTaggedPointerField( + __ LoadTaggedField( rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ movzxwq(rax, FieldOperand( rax, SharedFunctionInfo::kFormalParameterCountOffset)); @@ -824,7 +822,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // pass in the generator object. In ordinary calls, new.target is always // undefined because generator functions are non-constructable. static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); - __ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset)); + __ LoadTaggedField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset)); __ JumpCodeObject(rcx); } @@ -837,8 +835,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ PushRoot(RootIndex::kTheHoleValue); __ CallRuntime(Runtime::kDebugOnFunctionCall); __ Pop(rdx); - __ LoadTaggedPointerField( - rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(rdi, + FieldOperand(rdx, JSGeneratorObject::kFunctionOffset)); } __ jmp(&stepping_prepared); @@ -848,8 +846,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ Push(rdx); __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); __ Pop(rdx); - __ LoadTaggedPointerField( - rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset)); + __ LoadTaggedField(rdi, + FieldOperand(rdx, JSGeneratorObject::kFunctionOffset)); } __ jmp(&stepping_prepared); @@ -890,8 +888,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ leave(); // Drop receiver + arguments. - __ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes, + MacroAssembler::kCountIncludesReceiver); } // Tail-call |function_id| if |actual_state| == |expected_state| @@ -1019,13 +1017,12 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Get the bytecode array from the function object and load it into // kInterpreterBytecodeArrayRegister. const TaggedRegister shared_function_info(kScratchRegister); - __ LoadTaggedPointerField( + __ LoadTaggedField( shared_function_info, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( - kInterpreterBytecodeArrayRegister, - FieldOperand(shared_function_info, - SharedFunctionInfo::kFunctionDataOffset)); + __ LoadTaggedField(kInterpreterBytecodeArrayRegister, + FieldOperand(shared_function_info, + SharedFunctionInfo::kFunctionDataOffset)); Label is_baseline; GetSharedFunctionInfoBytecodeOrBaseline( @@ -1040,10 +1037,10 @@ void Builtins::Generate_InterpreterEntryTrampoline( // Load the feedback vector from the closure. TaggedRegister feedback_cell(feedback_vector); - __ LoadTaggedPointerField( - feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField(feedback_vector, - FieldOperand(feedback_cell, Cell::kValueOffset)); + __ LoadTaggedField(feedback_cell, + FieldOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldOperand(feedback_cell, Cell::kValueOffset)); Label push_stack_frame; // Check if feedback vector is valid. If valid, check for optimized code @@ -1220,10 +1217,10 @@ void Builtins::Generate_InterpreterEntryTrampoline( { // Load the feedback vector from the closure. TaggedRegister feedback_cell(feedback_vector); - __ LoadTaggedPointerField( - feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField(feedback_vector, - FieldOperand(feedback_cell, Cell::kValueOffset)); + __ LoadTaggedField(feedback_cell, + FieldOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldOperand(feedback_cell, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to @@ -1265,7 +1262,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, kSystemPointerSize)); // Push the arguments. __ PushArray(start_address, num_args, scratch, - TurboAssembler::PushArrayOrder::kReverse); + MacroAssembler::PushArrayOrder::kReverse); } // static @@ -1417,16 +1414,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { // trampoline. __ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset)); const TaggedRegister shared_function_info(rbx); - __ LoadTaggedPointerField( - shared_function_info, - FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( - rbx, FieldOperand(shared_function_info, - SharedFunctionInfo::kFunctionDataOffset)); + __ LoadTaggedField(shared_function_info, + FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedField(rbx, + FieldOperand(shared_function_info, + SharedFunctionInfo::kFunctionDataOffset)); __ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister); __ j(not_equal, &builtin_trampoline, Label::kNear); - __ LoadTaggedPointerField( + __ LoadTaggedField( rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset)); __ LoadCodeEntry(rbx, rbx); __ jmp(&trampoline_loaded, Label::kNear); @@ -1555,10 +1551,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { BaselineOutOfLinePrologueDescriptor::kClosure); // Load the feedback vector from the closure. TaggedRegister feedback_cell(feedback_vector); - __ LoadTaggedPointerField( - feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField(feedback_vector, - FieldOperand(feedback_cell, Cell::kValueOffset)); + __ LoadTaggedField(feedback_cell, + FieldOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldOperand(feedback_cell, Cell::kValueOffset)); __ AssertFeedbackVector(feedback_vector); // Check the tiering state. @@ -1814,8 +1810,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { } __ bind(&no_this_arg); __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1919,8 +1915,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { __ movq(rbx, args[3]); // argumentsList __ bind(&done); __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -1971,8 +1967,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ bind(&done); __ DropArgumentsAndPushNewReceiver( rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx, - TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } // ----------- S t a t e ------------- @@ -2097,8 +2093,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ cmpl(current, num); __ j(equal, &done, Label::kNear); // Turn the hole into undefined as we go. - __ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size, - FixedArray::kHeaderSize)); + __ LoadTaggedField(value, FieldOperand(src, current, times_tagged_size, + FixedArray::kHeaderSize)); __ CompareRoot(value, RootIndex::kTheHoleValue); __ j(not_equal, &push, Label::kNear); __ LoadRoot(value, RootIndex::kUndefinedValue); @@ -2213,8 +2209,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, StackArgumentsAccessor args(rax); __ AssertCallableFunction(rdi); - __ LoadTaggedPointerField( - rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedField(rdx, + FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); // ----------- S t a t e ------------- // -- rax : the number of arguments // -- rdx : the shared function info. @@ -2224,7 +2220,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, // Enter the context of the function; ToObject has to run in the function // context, and we also need to take the global proxy from the function // context in case of conversion. - __ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); + __ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset)); // We need to convert the receiver for non-native sloppy mode functions. Label done_convert; __ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset), @@ -2281,7 +2277,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Pop(rax); __ SmiUntagUnsigned(rax); } - __ LoadTaggedPointerField( + __ LoadTaggedField( rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ bind(&convert_receiver); } @@ -2312,8 +2308,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Load [[BoundArguments]] into rcx and length of that into rbx. Label no_bound_arguments; - __ LoadTaggedPointerField( - rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset)); + __ LoadTaggedField(rcx, + FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset)); __ SmiUntagFieldUnsigned(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); __ testl(rbx, rbx); __ j(zero, &no_bound_arguments); @@ -2354,7 +2350,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // Push [[BoundArguments]] to the stack. { Label loop; - __ LoadTaggedPointerField( + __ LoadTaggedField( rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset)); __ SmiUntagFieldUnsigned(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); @@ -2364,9 +2360,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) { // offset in order to be able to move decl(rbx) right before the loop // condition. This is necessary in order to avoid flags corruption by // pointer decompression code. - __ LoadAnyTaggedField( - r12, FieldOperand(rcx, rbx, times_tagged_size, - FixedArray::kHeaderSize - kTaggedSize)); + __ LoadTaggedField(r12, + FieldOperand(rcx, rbx, times_tagged_size, + FixedArray::kHeaderSize - kTaggedSize)); __ Push(r12); __ decl(rbx); __ j(greater, &loop); @@ -2391,15 +2387,14 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { // Patch the receiver to [[BoundThis]]. StackArgumentsAccessor args(rax); - __ LoadAnyTaggedField(rbx, - FieldOperand(rdi, JSBoundFunction::kBoundThisOffset)); + __ LoadTaggedField(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset)); __ movq(args.GetReceiverOperand(), rbx); // Push the [[BoundArguments]] onto the stack. Generate_PushBoundArguments(masm); // Call the [[BoundTargetFunction]] via the Call builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset)); __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), RelocInfo::CODE_TARGET); @@ -2498,9 +2493,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. const TaggedRegister shared_function_info(rcx); - __ LoadTaggedPointerField( - shared_function_info, - FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ LoadTaggedField(shared_function_info, + FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); __ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset), Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), @@ -2528,13 +2522,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { Label done; __ cmpq(rdi, rdx); __ j(not_equal, &done, Label::kNear); - __ LoadTaggedPointerField( + __ LoadTaggedField( rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset)); __ bind(&done); } // Construct the [[BoundTargetFunction]] via the Construct builtin. - __ LoadTaggedPointerField( + __ LoadTaggedField( rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset)); __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); } @@ -2677,7 +2671,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, // Load deoptimization data from the code object. const TaggedRegister deopt_data(rbx); - __ LoadTaggedPointerField( + __ LoadTaggedField( deopt_data, FieldOperand( rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset)); @@ -2776,12 +2770,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) { __ Push(rbp); __ Move(rbp, rsp); __ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM))); - __ LoadTaggedPointerField( - vector, FieldOperand(kWasmInstanceRegister, - WasmInstanceObject::kFeedbackVectorsOffset)); - __ LoadTaggedPointerField(vector, - FieldOperand(vector, func_index, times_tagged_size, - FixedArray::kHeaderSize)); + __ LoadTaggedField(vector, + FieldOperand(kWasmInstanceRegister, + WasmInstanceObject::kFeedbackVectorsOffset)); + __ LoadTaggedField(vector, FieldOperand(vector, func_index, times_tagged_size, + FixedArray::kHeaderSize)); Label allocate_vector, done; __ JumpIfSmi(vector, &allocate_vector); __ bind(&done); @@ -2931,7 +2924,7 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace, __ pushq(function_data); // We had to prepare the parameters for the Call: we have to put the context // into rsi. - __ LoadAnyTaggedField( + __ LoadTaggedField( rsi, MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged( WasmInstanceObject::kNativeContextOffset))); @@ -3012,7 +3005,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data, __ Move(GCScanSlotPlace, 2); __ Push(wasm_instance); __ Push(function_data); - __ LoadAnyTaggedField( + __ LoadTaggedField( kContextRegister, MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged( WasmInstanceObject::kNativeContextOffset))); @@ -3052,7 +3045,7 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance, wasm::JumpBuffer::Retired); Register parent = tmp2; - __ LoadAnyTaggedField( + __ LoadTaggedField( parent, FieldOperand(active_continuation, WasmContinuationObject::kParentOffset)); @@ -3083,7 +3076,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1, __ StoreTaggedSignedField( FieldOperand(suspender, WasmSuspenderObject::kStateOffset), Smi::FromInt(WasmSuspenderObject::kInactive)); - __ LoadAnyTaggedField( + __ LoadTaggedField( suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset)); __ CompareRoot(suspender, RootIndex::kUndefinedValue); Label undefined; @@ -3111,19 +3104,19 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm, Register wasm_instance) { Register closure = function_data; Register shared_function_info = closure; - __ LoadAnyTaggedField( + __ LoadTaggedField( shared_function_info, MemOperand( closure, wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction())); closure = no_reg; - __ LoadAnyTaggedField( + __ LoadTaggedField( function_data, MemOperand(shared_function_info, SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag)); shared_function_info = no_reg; - __ LoadAnyTaggedField( + __ LoadTaggedField( wasm_instance, MemOperand(function_data, WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag)); @@ -3224,7 +3217,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Register suspender = rax; // Fixed. __ movq(MemOperand(rbp, kSuspenderOffset), suspender); Register target_continuation = rax; - __ LoadAnyTaggedField( + __ LoadTaggedField( target_continuation, FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset)); suspender = no_reg; @@ -3728,7 +3721,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { Register function_entry = function_data; Register scratch = r12; - __ LoadAnyTaggedField( + __ LoadTaggedField( function_entry, FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset)); __ LoadExternalPointerField( @@ -3812,8 +3805,8 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) { // expected to be on the top of the stack). // We cannot use just the ret instruction for this, because we cannot pass the // number of slots to remove in a Register as an argument. - __ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountExcludesReceiver); + __ DropArguments(param_count, rbx, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountExcludesReceiver); __ ret(0); // -------------------------------------------------------------------------- @@ -4081,7 +4074,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { // live: [rax, rbx, rcx] Register suspender_continuation = rdx; - __ LoadAnyTaggedField( + __ LoadTaggedField( suspender_continuation, FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset)); #ifdef DEBUG @@ -4102,12 +4095,12 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { // Update roots. // ------------------------------------------- Register caller = rcx; - __ LoadAnyTaggedField(caller, - FieldOperand(suspender_continuation, - WasmContinuationObject::kParentOffset)); + __ LoadTaggedField(caller, + FieldOperand(suspender_continuation, + WasmContinuationObject::kParentOffset)); __ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller); Register parent = rdx; - __ LoadAnyTaggedField( + __ LoadTaggedField( parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset)); __ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent); parent = no_reg; @@ -4172,19 +4165,19 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { // Load suspender from closure. // ------------------------------------------- Register sfi = closure; - __ LoadAnyTaggedField( + __ LoadTaggedField( sfi, MemOperand( closure, wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction())); Register function_data = sfi; - __ LoadAnyTaggedField( + __ LoadTaggedField( function_data, FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset)); // The write barrier uses a fixed register for the host object (rdi). The next // barrier is on the suspender, so load it in rdi directly. Register suspender = rdi; - __ LoadAnyTaggedField( + __ LoadTaggedField( suspender, FieldOperand(function_data, WasmResumeData::kSuspenderOffset)); // Check the suspender state. Label suspender_is_suspended; @@ -4233,7 +4226,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { __ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender); Register target_continuation = suspender; - __ LoadAnyTaggedField( + __ LoadTaggedField( target_continuation, FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset)); suspender = no_reg; @@ -4848,16 +4841,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { // Insert additional parameters into the stack frame above return address. __ PopReturnAddressTo(scratch); __ Push(receiver); - __ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset), - decompr_scratch1); + __ PushTaggedField(FieldOperand(callback, AccessorInfo::kDataOffset), + decompr_scratch1); __ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue); __ Push(kScratchRegister); // return value __ Push(kScratchRegister); // return value default __ PushAddress(ExternalReference::isolate_address(masm->isolate())); __ Push(holder); __ Push(Smi::zero()); // should_throw_on_error -> false - __ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset), - decompr_scratch1); + __ PushTaggedField(FieldOperand(callback, AccessorInfo::kNameOffset), + decompr_scratch1); __ PushReturnAddressFrom(scratch); // v8::PropertyCallbackInfo::args_ array and name handle. @@ -5129,12 +5122,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, // Get the InstructionStream object from the shared function info. Register code_obj = rbx; TaggedRegister shared_function_info(code_obj); - __ LoadTaggedPointerField( + __ LoadTaggedField( shared_function_info, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ LoadTaggedPointerField( - code_obj, FieldOperand(shared_function_info, - SharedFunctionInfo::kFunctionDataOffset)); + __ LoadTaggedField(code_obj, + FieldOperand(shared_function_info, + SharedFunctionInfo::kFunctionDataOffset)); // Check if we have baseline code. For OSR entry it is safe to assume we // always have baseline code. @@ -5166,10 +5159,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, Register feedback_vector = r11; TaggedRegister feedback_cell(feedback_vector); - __ LoadTaggedPointerField( - feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); - __ LoadTaggedPointerField(feedback_vector, - FieldOperand(feedback_cell, Cell::kValueOffset)); + __ LoadTaggedField(feedback_cell, + FieldOperand(closure, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(feedback_vector, + FieldOperand(feedback_cell, Cell::kValueOffset)); Label install_baseline_code; // Check if feedback vector is valid. If not, call prepare for baseline to diff --git a/src/codegen/arm/assembler-arm.h b/src/codegen/arm/assembler-arm.h index 592491db4d..cd68628b24 100644 --- a/src/codegen/arm/assembler-arm.h +++ b/src/codegen/arm/assembler-arm.h @@ -1435,7 +1435,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { private: friend class Assembler; - friend class TurboAssembler; + friend class MacroAssembler; template bool CanAcquireVfp() const; diff --git a/src/codegen/arm/macro-assembler-arm.cc b/src/codegen/arm/macro-assembler-arm.cc index 2055bd8157..2588de4784 100644 --- a/src/codegen/arm/macro-assembler-arm.cc +++ b/src/codegen/arm/macro-assembler-arm.cc @@ -42,7 +42,7 @@ namespace v8 { namespace internal { -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -59,7 +59,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -77,7 +77,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -95,7 +95,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -106,11 +106,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, ldr(destination, MemOperand(destination, offset)); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { ldr(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Move(destination, kRootRegister); @@ -119,7 +119,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -148,20 +148,20 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond) { bx(target, cond); } +void MacroAssembler::Jump(Register target, Condition cond) { bx(target, cond); } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond) { mov(pc, Operand(target, rmode), LeaveCC, cond); } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -177,20 +177,20 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Jump(static_cast(code.address()), rmode, cond); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); Move(scratch, reference); Jump(scratch); } -void TurboAssembler::Call(Register target, Condition cond) { +void MacroAssembler::Call(Register target, Condition cond) { // Block constant pool for the call instruction sequence. BlockConstPoolScope block_const_pool(this); blx(target, cond); } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, TargetAddressStorageMode mode, bool check_constant_pool) { // Check if we have to emit the constant pool before we block it. @@ -225,7 +225,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, } } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond, TargetAddressStorageMode mode, bool check_constant_pool) { DCHECK(RelocInfo::IsCodeTarget(rmode)); @@ -242,7 +242,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, mode); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); static_assert(kSystemPointerSize == 4); static_assert(kSmiShiftSize == 0); @@ -258,25 +258,25 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ldr(builtin_index, MemOperand(kRootRegister, builtin_index)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { ASM_CODE_COMMENT(this); ldr(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not // preserve scratch registers across calls. @@ -307,7 +307,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not @@ -339,12 +339,12 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -353,20 +353,20 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_object, code_object); Call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code_object, code_object); Jump(code_object); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { ASM_CODE_COMMENT(this); // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. @@ -384,15 +384,15 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { Call(target); } -void TurboAssembler::Ret(Condition cond) { bx(lr, cond); } +void MacroAssembler::Ret(Condition cond) { bx(lr, cond); } -void TurboAssembler::Drop(int count, Condition cond) { +void MacroAssembler::Drop(int count, Condition cond) { if (count > 0) { add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond); } } -void TurboAssembler::Drop(Register count, Condition cond) { +void MacroAssembler::Drop(Register count, Condition cond) { add(sp, sp, Operand(count, LSL, kPointerSizeLog2), LeaveCC, cond); } @@ -407,23 +407,23 @@ Operand MacroAssembler::ClearedValue() const { static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); } -void TurboAssembler::Call(Label* target) { bl(target); } +void MacroAssembler::Call(Label* target) { bl(target); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); mov(scratch, Operand(handle)); push(scratch); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); mov(scratch, Operand(smi)); push(scratch); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, PushArrayOrder order) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -453,9 +453,9 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -void TurboAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); } +void MacroAssembler::Move(Register dst, Smi smi) { mov(dst, Operand(smi)); } -void TurboAssembler::Move(Register dst, Handle value) { +void MacroAssembler::Move(Register dst, Handle value) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -466,7 +466,7 @@ void TurboAssembler::Move(Register dst, Handle value) { mov(dst, Operand(value)); } -void TurboAssembler::Move(Register dst, ExternalReference reference) { +void MacroAssembler::Move(Register dst, ExternalReference reference) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -477,33 +477,33 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) { mov(dst, Operand(reference)); } -void TurboAssembler::Move(Register dst, Register src, Condition cond) { +void MacroAssembler::Move(Register dst, Register src, Condition cond) { if (dst != src) { mov(dst, src, LeaveCC, cond); } } -void TurboAssembler::Move(SwVfpRegister dst, SwVfpRegister src, +void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src, Condition cond) { if (dst != src) { vmov(dst, src, cond); } } -void TurboAssembler::Move(DwVfpRegister dst, DwVfpRegister src, +void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src, Condition cond) { if (dst != src) { vmov(dst, src, cond); } } -void TurboAssembler::Move(QwNeonRegister dst, QwNeonRegister src) { +void MacroAssembler::Move(QwNeonRegister dst, QwNeonRegister src) { if (dst != src) { vmov(dst, src); } } -void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, +void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1, Register src1) { DCHECK_NE(dst0, dst1); if (dst0 != src1) { @@ -519,7 +519,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, } } -void TurboAssembler::Swap(Register srcdst0, Register srcdst1) { +void MacroAssembler::Swap(Register srcdst0, Register srcdst1) { DCHECK(srcdst0 != srcdst1); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -528,7 +528,7 @@ void TurboAssembler::Swap(Register srcdst0, Register srcdst1) { mov(srcdst1, scratch); } -void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { +void MacroAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { DCHECK(srcdst0 != srcdst1); DCHECK(VfpRegisterIsAvailable(srcdst0)); DCHECK(VfpRegisterIsAvailable(srcdst1)); @@ -544,7 +544,7 @@ void TurboAssembler::Swap(DwVfpRegister srcdst0, DwVfpRegister srcdst1) { } } -void TurboAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) { +void MacroAssembler::Swap(QwNeonRegister srcdst0, QwNeonRegister srcdst1) { DCHECK(srcdst0 != srcdst1); vswp(srcdst0, srcdst1); } @@ -617,7 +617,7 @@ void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width, } } -void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width, +void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width, Condition cond) { DCHECK_LT(lsb, 32); if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) { @@ -630,7 +630,7 @@ void TurboAssembler::Bfc(Register dst, Register src, int lsb, int width, } } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition cond) { ldr(destination, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), cond); @@ -674,19 +674,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset, bind(&done); } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; ASM_CODE_COMMENT(this); stm(db_w, sp, registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; ASM_CODE_COMMENT(this); ldm(ia_w, sp, registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); @@ -703,7 +703,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -721,7 +721,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { ASM_CODE_COMMENT(this); @@ -740,7 +740,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, } } -void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, +void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset) { DCHECK_NE(dst_object, dst_slot); DCHECK(offset.IsRegister() || offset.IsImmediate()); @@ -828,7 +828,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, bind(&done); } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { ASM_CODE_COMMENT(this); if (marker_reg.is_valid()) { if (marker_reg.code() > fp.code()) { @@ -845,7 +845,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { ASM_CODE_COMMENT(this); DCHECK(!function_reg.is_valid() || function_reg.code() < cp.code()); stm(db_w, sp, {function_reg, cp, fp, lr}); @@ -855,7 +855,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) { Push(kJavaScriptCallArgCountRegister); } -void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, +void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { // Subtracting 0.0 preserves all inputs except for signalling NaNs, which @@ -864,35 +864,35 @@ void TurboAssembler::VFPCanonicalizeNaN(const DwVfpRegister dst, vsub(dst, src, kDoubleRegZero, cond); } -void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, +void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void TurboAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, +void MacroAssembler::VFPCompareAndSetFlags(const SwVfpRegister src1, const float src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, +void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void TurboAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, +void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, const double src2, const Condition cond) { // Compare and move FPSCR flags to the normal condition flags. VFPCompareAndLoadFlags(src1, src2, pc, cond); } -void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, +void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, const SwVfpRegister src2, const Register fpscr_flags, const Condition cond) { @@ -901,7 +901,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, vmrs(fpscr_flags, cond); } -void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, +void MacroAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, const Register fpscr_flags, const Condition cond) { @@ -910,7 +910,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const SwVfpRegister src1, vmrs(fpscr_flags, cond); } -void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, +void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Register fpscr_flags, const Condition cond) { @@ -919,7 +919,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, vmrs(fpscr_flags, cond); } -void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, +void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2, const Register fpscr_flags, const Condition cond) { @@ -928,7 +928,7 @@ void TurboAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, vmrs(fpscr_flags, cond); } -void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) { +void MacroAssembler::VmovHigh(Register dst, DwVfpRegister src) { if (src.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); vmov(dst, loc.high()); @@ -937,7 +937,7 @@ void TurboAssembler::VmovHigh(Register dst, DwVfpRegister src) { } } -void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) { +void MacroAssembler::VmovHigh(DwVfpRegister dst, Register src) { if (dst.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); vmov(loc.high(), src); @@ -946,7 +946,7 @@ void TurboAssembler::VmovHigh(DwVfpRegister dst, Register src) { } } -void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) { +void MacroAssembler::VmovLow(Register dst, DwVfpRegister src) { if (src.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(src.code()); vmov(dst, loc.low()); @@ -955,7 +955,7 @@ void TurboAssembler::VmovLow(Register dst, DwVfpRegister src) { } } -void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) { +void MacroAssembler::VmovLow(DwVfpRegister dst, Register src) { if (dst.code() < 16) { const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); vmov(loc.low(), src); @@ -964,7 +964,7 @@ void TurboAssembler::VmovLow(DwVfpRegister dst, Register src) { } } -void TurboAssembler::VmovExtended(Register dst, int src_code) { +void MacroAssembler::VmovExtended(Register dst, int src_code) { DCHECK_LE(SwVfpRegister::kNumRegisters, src_code); DCHECK_GT(SwVfpRegister::kNumRegisters * 2, src_code); if (src_code & 0x1) { @@ -974,7 +974,7 @@ void TurboAssembler::VmovExtended(Register dst, int src_code) { } } -void TurboAssembler::VmovExtended(int dst_code, Register src) { +void MacroAssembler::VmovExtended(int dst_code, Register src) { DCHECK_LE(SwVfpRegister::kNumRegisters, dst_code); DCHECK_GT(SwVfpRegister::kNumRegisters * 2, dst_code); if (dst_code & 0x1) { @@ -984,7 +984,7 @@ void TurboAssembler::VmovExtended(int dst_code, Register src) { } } -void TurboAssembler::VmovExtended(int dst_code, int src_code) { +void MacroAssembler::VmovExtended(int dst_code, int src_code) { if (src_code == dst_code) return; if (src_code < SwVfpRegister::kNumRegisters && @@ -1054,7 +1054,7 @@ void TurboAssembler::VmovExtended(int dst_code, int src_code) { } } -void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) { +void MacroAssembler::VmovExtended(int dst_code, const MemOperand& src) { if (dst_code < SwVfpRegister::kNumRegisters) { vldr(SwVfpRegister::from_code(dst_code), src); } else { @@ -1068,7 +1068,7 @@ void TurboAssembler::VmovExtended(int dst_code, const MemOperand& src) { } } -void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) { +void MacroAssembler::VmovExtended(const MemOperand& dst, int src_code) { if (src_code < SwVfpRegister::kNumRegisters) { vstr(SwVfpRegister::from_code(src_code), dst); } else { @@ -1081,7 +1081,7 @@ void TurboAssembler::VmovExtended(const MemOperand& dst, int src_code) { } } -void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src, +void MacroAssembler::ExtractLane(Register dst, QwNeonRegister src, NeonDataType dt, int lane) { int size = NeonSz(dt); // 0, 1, 2 int byte = lane << size; @@ -1093,7 +1093,7 @@ void TurboAssembler::ExtractLane(Register dst, QwNeonRegister src, vmov(dt, dst, double_source, double_lane); } -void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src, +void MacroAssembler::ExtractLane(Register dst, DwVfpRegister src, NeonDataType dt, int lane) { int size = NeonSz(dt); // 0, 1, 2 int byte = lane << size; @@ -1102,19 +1102,19 @@ void TurboAssembler::ExtractLane(Register dst, DwVfpRegister src, vmov(dt, dst, src, double_lane); } -void TurboAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src, +void MacroAssembler::ExtractLane(SwVfpRegister dst, QwNeonRegister src, int lane) { int s_code = src.code() * 4 + lane; VmovExtended(dst.code(), s_code); } -void TurboAssembler::ExtractLane(DwVfpRegister dst, QwNeonRegister src, +void MacroAssembler::ExtractLane(DwVfpRegister dst, QwNeonRegister src, int lane) { DwVfpRegister double_dst = DwVfpRegister::from_code(src.code() * 2 + lane); vmov(dst, double_dst); } -void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, +void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, Register src_lane, NeonDataType dt, int lane) { Move(dst, src); int size = NeonSz(dt); // 0, 1, 2 @@ -1127,21 +1127,21 @@ void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, vmov(dt, double_dst, double_lane, src_lane); } -void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, +void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, SwVfpRegister src_lane, int lane) { Move(dst, src); int s_code = dst.code() * 4 + lane; VmovExtended(s_code, src_lane.code()); } -void TurboAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, +void MacroAssembler::ReplaceLane(QwNeonRegister dst, QwNeonRegister src, DwVfpRegister src_lane, int lane) { Move(dst, src); DwVfpRegister double_dst = DwVfpRegister::from_code(dst.code() * 2 + lane); vmov(double_dst, src_lane); } -void TurboAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list, +void MacroAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list, uint8_t lane, NeonMemOperand src) { if (sz == Neon64) { // vld1s is not valid for Neon64. @@ -1151,7 +1151,7 @@ void TurboAssembler::LoadLane(NeonSize sz, NeonListOperand dst_list, } } -void TurboAssembler::StoreLane(NeonSize sz, NeonListOperand src_list, +void MacroAssembler::StoreLane(NeonSize sz, NeonListOperand src_list, uint8_t lane, NeonMemOperand dst) { if (sz == Neon64) { // vst1s is not valid for Neon64. @@ -1161,7 +1161,7 @@ void TurboAssembler::StoreLane(NeonSize sz, NeonListOperand src_list, } } -void TurboAssembler::LslPair(Register dst_low, Register dst_high, +void MacroAssembler::LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift) { DCHECK(!AreAliased(dst_high, src_low)); @@ -1186,7 +1186,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::LslPair(Register dst_low, Register dst_high, +void MacroAssembler::LslPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK_GE(63, shift); @@ -1209,7 +1209,7 @@ void TurboAssembler::LslPair(Register dst_low, Register dst_high, } } -void TurboAssembler::LsrPair(Register dst_low, Register dst_high, +void MacroAssembler::LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1235,7 +1235,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::LsrPair(Register dst_low, Register dst_high, +void MacroAssembler::LsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK_GE(63, shift); @@ -1258,7 +1258,7 @@ void TurboAssembler::LsrPair(Register dst_low, Register dst_high, } } -void TurboAssembler::AsrPair(Register dst_low, Register dst_high, +void MacroAssembler::AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1283,7 +1283,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::AsrPair(Register dst_low, Register dst_high, +void MacroAssembler::AsrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK_GE(63, shift); @@ -1306,7 +1306,7 @@ void TurboAssembler::AsrPair(Register dst_low, Register dst_high, } } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -1314,9 +1314,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) { PushCommonFrame(scratch); } -void TurboAssembler::Prologue() { PushStandardFrame(r1); } +void MacroAssembler::Prologue() { PushStandardFrame(r1); } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kPointerSize : 0; switch (type) { @@ -1339,7 +1339,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode) { @@ -1354,7 +1354,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::EnterFrame(StackFrame::Type type, +void MacroAssembler::EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { ASM_CODE_COMMENT(this); // r0-r3: preserved @@ -1370,7 +1370,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type, #endif // V8_ENABLE_WEBASSEMBLY } -int TurboAssembler::LeaveFrame(StackFrame::Type type) { +int MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); // r0: preserved // r1: preserved @@ -1385,7 +1385,7 @@ int TurboAssembler::LeaveFrame(StackFrame::Type type) { } #ifdef V8_OS_WIN -void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { +void MacroAssembler::AllocateStackSpace(Register bytes_scratch) { // "Functions that allocate 4 KB or more on the stack must ensure that each // page prior to the final page is touched in order." Source: // https://docs.microsoft.com/en-us/cpp/build/overview-of-arm-abi-conventions?view=vs-2019#stack @@ -1408,7 +1408,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { sub(sp, sp, bytes_scratch); } -void TurboAssembler::AllocateStackSpace(int bytes) { +void MacroAssembler::AllocateStackSpace(int bytes) { ASM_CODE_COMMENT(this); DCHECK_GE(bytes, 0); UseScratchRegisterScope temps(this); @@ -1470,7 +1470,7 @@ void MacroAssembler::EnterExitFrame(int stack_space, str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_ARM // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1522,7 +1522,7 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, } } -void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) { +void MacroAssembler::MovFromFloatResult(const DwVfpRegister dst) { if (use_eabi_hardfloat()) { Move(dst, d0); } else { @@ -1531,7 +1531,7 @@ void TurboAssembler::MovFromFloatResult(const DwVfpRegister dst) { } // On ARM this is just a synonym to make the purpose clear. -void TurboAssembler::MovFromFloatParameter(DwVfpRegister dst) { +void MacroAssembler::MovFromFloatParameter(DwVfpRegister dst) { MovFromFloatResult(dst); } @@ -1543,10 +1543,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); ldr(destination, MemOperand(kRootRegister, offset)); } @@ -1841,7 +1841,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, b(ls, on_in_range); } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DwVfpRegister double_input, Label* done) { ASM_CODE_COMMENT(this); @@ -1867,7 +1867,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result, b(lt, done); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DwVfpRegister double_input, StubCallMode stub_mode) { @@ -2121,11 +2121,11 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cond, AbortReason reason) { +void MacroAssembler::Assert(Condition cond, AbortReason reason) { if (v8_flags.debug_code) Check(cond, reason); } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } @@ -2234,7 +2234,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, } #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::Check(Condition cond, AbortReason reason) { +void MacroAssembler::Check(Condition cond, AbortReason reason) { Label L; b(cond, &L); Abort(reason); @@ -2242,7 +2242,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) { bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { ASM_CODE_COMMENT(this); Label abort_start; bind(&abort_start); @@ -2290,7 +2290,7 @@ void TurboAssembler::Abort(AbortReason reason) { // will not return here } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { ldr(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } @@ -2307,7 +2307,7 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { ldr(dst, MemOperand(dst, Context::SlotOffset(index))); } -void TurboAssembler::InitializeRootRegister() { +void MacroAssembler::InitializeRootRegister() { ASM_CODE_COMMENT(this); ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); mov(kRootRegister, Operand(isolate_root)); @@ -2325,17 +2325,17 @@ void MacroAssembler::SmiTst(Register value) { tst(value, Operand(kSmiTagMask)); } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) { +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) { tst(value, Operand(kSmiTagMask)); b(eq, smi_label); } -void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { cmp(x, Operand(y)); b(eq, dest); } -void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { cmp(x, Operand(y)); b(lt, dest); } @@ -2345,14 +2345,14 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { b(ne, not_smi_label); } -void TurboAssembler::CheckFor32DRegs(Register scratch) { +void MacroAssembler::CheckFor32DRegs(Register scratch) { ASM_CODE_COMMENT(this); Move(scratch, ExternalReference::cpu_features()); ldr(scratch, MemOperand(scratch)); tst(scratch, Operand(1u << VFP32DREGS)); } -void TurboAssembler::SaveFPRegs(Register location, Register scratch) { +void MacroAssembler::SaveFPRegs(Register location, Register scratch) { ASM_CODE_COMMENT(this); CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); CheckFor32DRegs(scratch); @@ -2361,7 +2361,7 @@ void TurboAssembler::SaveFPRegs(Register location, Register scratch) { vstm(db_w, location, d0, d15); } -void TurboAssembler::RestoreFPRegs(Register location, Register scratch) { +void MacroAssembler::RestoreFPRegs(Register location, Register scratch) { ASM_CODE_COMMENT(this); CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); CheckFor32DRegs(scratch); @@ -2370,7 +2370,7 @@ void TurboAssembler::RestoreFPRegs(Register location, Register scratch) { add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); } -void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) { +void MacroAssembler::SaveFPRegsToHeap(Register location, Register scratch) { ASM_CODE_COMMENT(this); CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); CheckFor32DRegs(scratch); @@ -2379,7 +2379,7 @@ void TurboAssembler::SaveFPRegsToHeap(Register location, Register scratch) { add(location, location, Operand(16 * kDoubleSize), LeaveCC, eq); } -void TurboAssembler::RestoreFPRegsFromHeap(Register location, +void MacroAssembler::RestoreFPRegsFromHeap(Register location, Register scratch) { ASM_CODE_COMMENT(this); CpuFeatureScope scope(this, VFP32DREGS, CpuFeatureScope::kDontCheckSupported); @@ -2390,7 +2390,7 @@ void TurboAssembler::RestoreFPRegsFromHeap(Register location, } template -void TurboAssembler::FloatMaxHelper(T result, T left, T right, +void MacroAssembler::FloatMaxHelper(T result, T left, T right, Label* out_of_line) { // This trivial case is caught sooner, so that the out-of-line code can be // completely avoided. @@ -2421,7 +2421,7 @@ void TurboAssembler::FloatMaxHelper(T result, T left, T right, } template -void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) { +void MacroAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) { DCHECK(left != right); // ARMv8: At least one of left and right is a NaN. @@ -2434,7 +2434,7 @@ void TurboAssembler::FloatMaxOutOfLineHelper(T result, T left, T right) { } template -void TurboAssembler::FloatMinHelper(T result, T left, T right, +void MacroAssembler::FloatMinHelper(T result, T left, T right, Label* out_of_line) { // This trivial case is caught sooner, so that the out-of-line code can be // completely avoided. @@ -2480,7 +2480,7 @@ void TurboAssembler::FloatMinHelper(T result, T left, T right, } template -void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) { +void MacroAssembler::FloatMinOutOfLineHelper(T result, T left, T right) { DCHECK(left != right); // At least one of left and right is a NaN. Use vadd to propagate the NaN @@ -2488,42 +2488,42 @@ void TurboAssembler::FloatMinOutOfLineHelper(T result, T left, T right) { vadd(result, left, right); } -void TurboAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left, +void MacroAssembler::FloatMax(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label* out_of_line) { FloatMaxHelper(result, left, right, out_of_line); } -void TurboAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left, +void MacroAssembler::FloatMin(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right, Label* out_of_line) { FloatMinHelper(result, left, right, out_of_line); } -void TurboAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left, +void MacroAssembler::FloatMax(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, Label* out_of_line) { FloatMaxHelper(result, left, right, out_of_line); } -void TurboAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left, +void MacroAssembler::FloatMin(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right, Label* out_of_line) { FloatMinHelper(result, left, right, out_of_line); } -void TurboAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, +void MacroAssembler::FloatMaxOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right) { FloatMaxOutOfLineHelper(result, left, right); } -void TurboAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, +void MacroAssembler::FloatMinOutOfLine(SwVfpRegister result, SwVfpRegister left, SwVfpRegister right) { FloatMinOutOfLineHelper(result, left, right); } -void TurboAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left, +void MacroAssembler::FloatMaxOutOfLine(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right) { FloatMaxOutOfLineHelper(result, left, right); } -void TurboAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left, +void MacroAssembler::FloatMinOutOfLine(DwVfpRegister result, DwVfpRegister left, DwVfpRegister right) { FloatMinOutOfLineHelper(result, left, right); } @@ -2532,7 +2532,7 @@ static const int kRegisterPassedArguments = 4; // The hardfloat calling convention passes double arguments in registers d0-d7. static const int kDoubleRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; if (use_eabi_hardfloat()) { @@ -2554,7 +2554,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { ASM_CODE_COMMENT(this); @@ -2576,7 +2576,7 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, } } -void TurboAssembler::MovToFloatParameter(DwVfpRegister src) { +void MacroAssembler::MovToFloatParameter(DwVfpRegister src) { DCHECK(src == d0); if (!use_eabi_hardfloat()) { vmov(r0, r1, src); @@ -2584,11 +2584,11 @@ void TurboAssembler::MovToFloatParameter(DwVfpRegister src) { } // On ARM this is just a synonym to make the purpose clear. -void TurboAssembler::MovToFloatResult(DwVfpRegister src) { +void MacroAssembler::MovToFloatResult(DwVfpRegister src) { MovToFloatParameter(src); } -void TurboAssembler::MovToFloatParameters(DwVfpRegister src1, +void MacroAssembler::MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2) { DCHECK(src1 == d0); DCHECK(src2 == d1); @@ -2598,7 +2598,7 @@ void TurboAssembler::MovToFloatParameters(DwVfpRegister src1, } } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { UseScratchRegisterScope temps(this); @@ -2607,21 +2607,21 @@ void TurboAssembler::CallCFunction(ExternalReference function, CallCFunctionHelper(scratch, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); @@ -2704,7 +2704,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, } } -void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc, +void MacroAssembler::CheckPageFlag(Register object, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -2732,13 +2732,13 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { ASM_CODE_COMMENT(this); // We can use the register pc - 8 for the address of the current instruction. sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta)); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -2760,10 +2760,10 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DCHECK(!has_pending_constants()); } -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::I64x2BitMask(Register dst, QwNeonRegister src) { +void MacroAssembler::I64x2BitMask(Register dst, QwNeonRegister src) { UseScratchRegisterScope temps(this); QwNeonRegister tmp1 = temps.AcquireQ(); Register tmp = temps.Acquire(); @@ -2774,7 +2774,7 @@ void TurboAssembler::I64x2BitMask(Register dst, QwNeonRegister src) { add(dst, dst, Operand(tmp, LSL, 1)); } -void TurboAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, +void MacroAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) { UseScratchRegisterScope temps(this); Simd128Register scratch = temps.AcquireQ(); @@ -2783,7 +2783,7 @@ void TurboAssembler::I64x2Eq(QwNeonRegister dst, QwNeonRegister src1, vand(dst, dst, scratch); } -void TurboAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, +void MacroAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) { UseScratchRegisterScope temps(this); Simd128Register tmp = temps.AcquireQ(); @@ -2793,14 +2793,14 @@ void TurboAssembler::I64x2Ne(QwNeonRegister dst, QwNeonRegister src1, vorn(dst, dst, tmp); } -void TurboAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, +void MacroAssembler::I64x2GtS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) { ASM_CODE_COMMENT(this); vqsub(NeonS64, dst, src2, src1); vshr(NeonS64, dst, dst, 63); } -void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, +void MacroAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) { ASM_CODE_COMMENT(this); vqsub(NeonS64, dst, src1, src2); @@ -2808,7 +2808,7 @@ void TurboAssembler::I64x2GeS(QwNeonRegister dst, QwNeonRegister src1, vmvn(dst, dst); } -void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) { +void MacroAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); QwNeonRegister tmp = temps.AcquireQ(); @@ -2832,7 +2832,7 @@ void TurboAssembler::I64x2AllTrue(Register dst, QwNeonRegister src) { // = defintion of i64x2.all_true. } -void TurboAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) { +void MacroAssembler::I64x2Abs(QwNeonRegister dst, QwNeonRegister src) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Simd128Register tmp = temps.AcquireQ(); @@ -2861,17 +2861,17 @@ void F64x2ConvertLowHelper(Assembler* assm, QwNeonRegister dst, } } // namespace -void TurboAssembler::F64x2ConvertLowI32x4S(QwNeonRegister dst, +void MacroAssembler::F64x2ConvertLowI32x4S(QwNeonRegister dst, QwNeonRegister src) { F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_s32); } -void TurboAssembler::F64x2ConvertLowI32x4U(QwNeonRegister dst, +void MacroAssembler::F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src) { F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_u32); } -void TurboAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst, +void MacroAssembler::F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src) { F64x2ConvertLowHelper(this, dst, src, &Assembler::vcvt_f64_f32); } diff --git a/src/codegen/arm/macro-assembler-arm.h b/src/codegen/arm/macro-assembler-arm.h index ea95b03416..fb69d20f4d 100644 --- a/src/codegen/arm/macro-assembler-arm.h +++ b/src/codegen/arm/macro-assembler-arm.h @@ -43,9 +43,9 @@ enum TargetAddressStorageMode { NEVER_INLINE_TARGET_ADDRESS }; -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; // Activation support. void EnterFrame(StackFrame::Type type, @@ -596,49 +596,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src); void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src); - private: - // Compare single values and then load the fpscr flags to a register. - void VFPCompareAndLoadFlags(const SwVfpRegister src1, - const SwVfpRegister src2, - const Register fpscr_flags, - const Condition cond = al); - void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, - const Register fpscr_flags, - const Condition cond = al); - - // Compare double values and then load the fpscr flags to a register. - void VFPCompareAndLoadFlags(const DwVfpRegister src1, - const DwVfpRegister src2, - const Register fpscr_flags, - const Condition cond = al); - void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2, - const Register fpscr_flags, - const Condition cond = al); - - void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); - - // Implementation helpers for FloatMin and FloatMax. - template - void FloatMaxHelper(T result, T left, T right, Label* out_of_line); - template - void FloatMinHelper(T result, T left, T right, Label* out_of_line); - template - void FloatMaxOutOfLineHelper(T result, T left, T right); - template - void FloatMinOutOfLineHelper(T result, T left, T right); - - int CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments); - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - void Mls(Register dst, Register src1, Register src2, Register srcA, Condition cond = al); void And(Register dst, Register src1, const Operand& src2, @@ -899,6 +856,42 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { Register actual_parameter_count, Label* done, InvokeType type); + // Compare single values and then load the fpscr flags to a register. + void VFPCompareAndLoadFlags(const SwVfpRegister src1, + const SwVfpRegister src2, + const Register fpscr_flags, + const Condition cond = al); + void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2, + const Register fpscr_flags, + const Condition cond = al); + + // Compare double values and then load the fpscr flags to a register. + void VFPCompareAndLoadFlags(const DwVfpRegister src1, + const DwVfpRegister src2, + const Register fpscr_flags, + const Condition cond = al); + void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2, + const Register fpscr_flags, + const Condition cond = al); + + void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); + + // Implementation helpers for FloatMin and FloatMax. + template + void FloatMaxHelper(T result, T left, T right, Label* out_of_line); + template + void FloatMinHelper(T result, T left, T right, Label* out_of_line); + template + void FloatMaxOutOfLineHelper(T result, T left, T right); + template + void FloatMinOutOfLineHelper(T result, T left, T right); + + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); }; diff --git a/src/codegen/arm64/assembler-arm64-inl.h b/src/codegen/arm64/assembler-arm64-inl.h index bd0c98dc44..525bc53525 100644 --- a/src/codegen/arm64/assembler-arm64-inl.h +++ b/src/codegen/arm64/assembler-arm64-inl.h @@ -659,8 +659,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { Tagged_t compressed = Assembler::target_compressed_address_at(pc_, constant_pool_); DCHECK(!HAS_SMI_TAG(compressed)); - Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, - compressed)); + Object obj( + V8HeapCompressionScheme::DecompressTagged(cage_base, compressed)); // Embedding of compressed InstructionStream objects must not happen when // external code space is enabled, because Codes must be used // instead. diff --git a/src/codegen/arm64/macro-assembler-arm64-inl.h b/src/codegen/arm64/macro-assembler-arm64-inl.h index 6a4cc044ee..55e416957a 100644 --- a/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -21,26 +21,26 @@ MemOperand FieldMemOperand(Register object, int offset) { return MemOperand(object, offset - kHeapObjectTag); } -void TurboAssembler::And(const Register& rd, const Register& rn, +void MacroAssembler::And(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, AND); } -void TurboAssembler::Ands(const Register& rd, const Register& rn, +void MacroAssembler::Ands(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ANDS); } -void TurboAssembler::Tst(const Register& rn, const Operand& operand) { +void MacroAssembler::Tst(const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS); } -void TurboAssembler::Bic(const Register& rd, const Register& rn, +void MacroAssembler::Bic(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -54,35 +54,35 @@ void MacroAssembler::Bics(const Register& rd, const Register& rn, LogicalMacro(rd, rn, operand, BICS); } -void TurboAssembler::Orr(const Register& rd, const Register& rn, +void MacroAssembler::Orr(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ORR); } -void TurboAssembler::Orn(const Register& rd, const Register& rn, +void MacroAssembler::Orn(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, ORN); } -void TurboAssembler::Eor(const Register& rd, const Register& rn, +void MacroAssembler::Eor(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, EOR); } -void TurboAssembler::Eon(const Register& rd, const Register& rn, +void MacroAssembler::Eon(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); LogicalMacro(rd, rn, operand, EON); } -void TurboAssembler::Ccmp(const Register& rn, const Operand& operand, +void MacroAssembler::Ccmp(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) { @@ -92,7 +92,7 @@ void TurboAssembler::Ccmp(const Register& rn, const Operand& operand, } } -void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand, +void MacroAssembler::CcmpTagged(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond) { if (COMPRESS_POINTERS_BOOL) { Ccmp(rn.W(), operand.ToW(), nzcv, cond); @@ -101,7 +101,7 @@ void TurboAssembler::CcmpTagged(const Register& rn, const Operand& operand, } } -void TurboAssembler::Ccmn(const Register& rn, const Operand& operand, +void MacroAssembler::Ccmn(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) { @@ -111,7 +111,7 @@ void TurboAssembler::Ccmn(const Register& rn, const Operand& operand, } } -void TurboAssembler::Add(const Register& rd, const Register& rn, +void MacroAssembler::Add(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && @@ -122,7 +122,7 @@ void TurboAssembler::Add(const Register& rd, const Register& rn, } } -void TurboAssembler::Adds(const Register& rd, const Register& rn, +void MacroAssembler::Adds(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && @@ -133,7 +133,7 @@ void TurboAssembler::Adds(const Register& rd, const Register& rn, } } -void TurboAssembler::Sub(const Register& rd, const Register& rn, +void MacroAssembler::Sub(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && @@ -144,7 +144,7 @@ void TurboAssembler::Sub(const Register& rd, const Register& rn, } } -void TurboAssembler::Subs(const Register& rd, const Register& rn, +void MacroAssembler::Subs(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.IsImmediate() && (operand.ImmediateValue() < 0) && @@ -155,17 +155,17 @@ void TurboAssembler::Subs(const Register& rd, const Register& rn, } } -void TurboAssembler::Cmn(const Register& rn, const Operand& operand) { +void MacroAssembler::Cmn(const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); Adds(AppropriateZeroRegFor(rn), rn, operand); } -void TurboAssembler::Cmp(const Register& rn, const Operand& operand) { +void MacroAssembler::Cmp(const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); Subs(AppropriateZeroRegFor(rn), rn, operand); } -void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) { +void MacroAssembler::CmpTagged(const Register& rn, const Operand& operand) { if (COMPRESS_POINTERS_BOOL) { Cmp(rn.W(), operand.ToW()); } else { @@ -173,7 +173,7 @@ void TurboAssembler::CmpTagged(const Register& rn, const Operand& operand) { } } -void TurboAssembler::Neg(const Register& rd, const Operand& operand) { +void MacroAssembler::Neg(const Register& rd, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); if (operand.IsImmediate()) { @@ -183,12 +183,12 @@ void TurboAssembler::Neg(const Register& rd, const Operand& operand) { } } -void TurboAssembler::Negs(const Register& rd, const Operand& operand) { +void MacroAssembler::Negs(const Register& rd, const Operand& operand) { DCHECK(allow_macro_instructions()); Subs(rd, AppropriateZeroRegFor(rd), operand); } -void TurboAssembler::Adc(const Register& rd, const Register& rn, +void MacroAssembler::Adc(const Register& rd, const Register& rn, const Operand& operand) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -230,14 +230,14 @@ void MacroAssembler::Ngcs(const Register& rd, const Operand& operand) { Sbcs(rd, zr, operand); } -void TurboAssembler::Mvn(const Register& rd, uint64_t imm) { +void MacroAssembler::Mvn(const Register& rd, uint64_t imm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); Mov(rd, ~imm); } #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \ - void TurboAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ + void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ DCHECK(allow_macro_instructions()); \ LoadStoreMacro(REG, addr, OP); \ } @@ -245,7 +245,7 @@ LS_MACRO_LIST(DEFINE_FUNCTION) #undef DEFINE_FUNCTION #define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \ - void TurboAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \ + void MacroAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \ const MemOperand& addr) { \ DCHECK(allow_macro_instructions()); \ LoadStorePairMacro(REG, REG2, addr, OP); \ @@ -254,7 +254,7 @@ LSPAIR_MACRO_LIST(DEFINE_FUNCTION) #undef DEFINE_FUNCTION #define DECLARE_FUNCTION(FN, OP) \ - void TurboAssembler::FN(const Register& rt, const Register& rn) { \ + void MacroAssembler::FN(const Register& rt, const Register& rn) { \ DCHECK(allow_macro_instructions()); \ OP(rt, rn); \ } @@ -270,32 +270,32 @@ LDA_STL_MACRO_LIST(DECLARE_FUNCTION) STLX_MACRO_LIST(DECLARE_FUNCTION) #undef DECLARE_FUNCTION -void TurboAssembler::Asr(const Register& rd, const Register& rn, +void MacroAssembler::Asr(const Register& rd, const Register& rn, unsigned shift) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); asr(rd, rn, shift); } -void TurboAssembler::Asr(const Register& rd, const Register& rn, +void MacroAssembler::Asr(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); asrv(rd, rn, rm); } -void TurboAssembler::B(Label* label) { +void MacroAssembler::B(Label* label) { DCHECK(allow_macro_instructions()); b(label); CheckVeneerPool(false, false); } -void TurboAssembler::B(Condition cond, Label* label) { +void MacroAssembler::B(Condition cond, Label* label) { DCHECK(allow_macro_instructions()); B(label, cond); } -void TurboAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Bfi(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -309,7 +309,7 @@ void MacroAssembler::Bfxil(const Register& rd, const Register& rn, unsigned lsb, bfxil(rd, rn, lsb, width); } -void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) { +void MacroAssembler::Bind(Label* label, BranchTargetIdentifier id) { DCHECK(allow_macro_instructions()); if (id == BranchTargetIdentifier::kNone) { bind(label); @@ -326,21 +326,21 @@ void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) { } } -void TurboAssembler::CodeEntry() { CallTarget(); } +void MacroAssembler::CodeEntry() { CallTarget(); } -void TurboAssembler::ExceptionHandler() { JumpTarget(); } +void MacroAssembler::ExceptionHandler() { JumpTarget(); } -void TurboAssembler::BindExceptionHandler(Label* label) { +void MacroAssembler::BindExceptionHandler(Label* label) { BindJumpTarget(label); } -void TurboAssembler::JumpTarget() { +void MacroAssembler::JumpTarget() { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY bti(BranchTargetIdentifier::kBtiJump); #endif } -void TurboAssembler::BindJumpTarget(Label* label) { +void MacroAssembler::BindJumpTarget(Label* label) { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY Bind(label, BranchTargetIdentifier::kBtiJump); #else @@ -348,19 +348,19 @@ void TurboAssembler::BindJumpTarget(Label* label) { #endif } -void TurboAssembler::CallTarget() { +void MacroAssembler::CallTarget() { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY bti(BranchTargetIdentifier::kBtiCall); #endif } -void TurboAssembler::JumpOrCallTarget() { +void MacroAssembler::JumpOrCallTarget() { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY bti(BranchTargetIdentifier::kBtiJumpCall); #endif } -void TurboAssembler::BindJumpOrCallTarget(Label* label) { +void MacroAssembler::BindJumpOrCallTarget(Label* label) { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY Bind(label, BranchTargetIdentifier::kBtiJumpCall); #else @@ -368,24 +368,24 @@ void TurboAssembler::BindJumpOrCallTarget(Label* label) { #endif } -void TurboAssembler::Bl(Label* label) { +void MacroAssembler::Bl(Label* label) { DCHECK(allow_macro_instructions()); bl(label); } -void TurboAssembler::Blr(const Register& xn) { +void MacroAssembler::Blr(const Register& xn) { DCHECK(allow_macro_instructions()); DCHECK(!xn.IsZero()); blr(xn); } -void TurboAssembler::Br(const Register& xn) { +void MacroAssembler::Br(const Register& xn) { DCHECK(allow_macro_instructions()); DCHECK(!xn.IsZero()); br(xn); } -void TurboAssembler::Brk(int code) { +void MacroAssembler::Brk(int code) { DCHECK(allow_macro_instructions()); brk(code); } @@ -406,19 +406,19 @@ void MacroAssembler::Cinv(const Register& rd, const Register& rn, cinv(rd, rn, cond); } -void TurboAssembler::Cls(const Register& rd, const Register& rn) { +void MacroAssembler::Cls(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); cls(rd, rn); } -void TurboAssembler::Clz(const Register& rd, const Register& rn) { +void MacroAssembler::Clz(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); clz(rd, rn); } -void TurboAssembler::Cneg(const Register& rd, const Register& rn, +void MacroAssembler::Cneg(const Register& rd, const Register& rn, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -437,7 +437,7 @@ void MacroAssembler::CzeroX(const Register& rd, Condition cond) { // Conditionally move a value into the destination register. Only X registers // are supported due to the truncation side-effect when used on W registers. -void TurboAssembler::CmovX(const Register& rd, const Register& rn, +void MacroAssembler::CmovX(const Register& rd, const Register& rn, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsSP()); @@ -448,26 +448,26 @@ void TurboAssembler::CmovX(const Register& rd, const Register& rn, } } -void TurboAssembler::Csdb() { +void MacroAssembler::Csdb() { DCHECK(allow_macro_instructions()); csdb(); } -void TurboAssembler::Cset(const Register& rd, Condition cond) { +void MacroAssembler::Cset(const Register& rd, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); cset(rd, cond); } -void TurboAssembler::Csetm(const Register& rd, Condition cond) { +void MacroAssembler::Csetm(const Register& rd, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); DCHECK((cond != al) && (cond != nv)); csetm(rd, cond); } -void TurboAssembler::Csinc(const Register& rd, const Register& rn, +void MacroAssembler::Csinc(const Register& rd, const Register& rn, const Register& rm, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -491,17 +491,17 @@ void MacroAssembler::Csneg(const Register& rd, const Register& rn, csneg(rd, rn, rm, cond); } -void TurboAssembler::Dmb(BarrierDomain domain, BarrierType type) { +void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) { DCHECK(allow_macro_instructions()); dmb(domain, type); } -void TurboAssembler::Dsb(BarrierDomain domain, BarrierType type) { +void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) { DCHECK(allow_macro_instructions()); dsb(domain, type); } -void TurboAssembler::Debug(const char* message, uint32_t code, Instr params) { +void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) { DCHECK(allow_macro_instructions()); debug(message, code, params); } @@ -513,25 +513,25 @@ void MacroAssembler::Extr(const Register& rd, const Register& rn, extr(rd, rn, rm, lsb); } -void TurboAssembler::Fabs(const VRegister& fd, const VRegister& fn) { +void MacroAssembler::Fabs(const VRegister& fd, const VRegister& fn) { DCHECK(allow_macro_instructions()); fabs(fd, fn); } -void TurboAssembler::Fadd(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fadd(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fadd(fd, fn, fm); } -void TurboAssembler::Fccmp(const VRegister& fn, const VRegister& fm, +void MacroAssembler::Fccmp(const VRegister& fn, const VRegister& fm, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); fccmp(fn, fm, nzcv, cond); } -void TurboAssembler::Fccmp(const VRegister& fn, const double value, +void MacroAssembler::Fccmp(const VRegister& fn, const double value, StatusFlags nzcv, Condition cond) { DCHECK(allow_macro_instructions()); UseScratchRegisterScope temps(this); @@ -540,12 +540,12 @@ void TurboAssembler::Fccmp(const VRegister& fn, const double value, Fccmp(fn, tmp, nzcv, cond); } -void TurboAssembler::Fcmp(const VRegister& fn, const VRegister& fm) { +void MacroAssembler::Fcmp(const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fcmp(fn, fm); } -void TurboAssembler::Fcmp(const VRegister& fn, double value) { +void MacroAssembler::Fcmp(const VRegister& fn, double value) { DCHECK(allow_macro_instructions()); if (value != 0.0) { UseScratchRegisterScope temps(this); @@ -557,66 +557,66 @@ void TurboAssembler::Fcmp(const VRegister& fn, double value) { } } -void TurboAssembler::Fcsel(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fcsel(const VRegister& fd, const VRegister& fn, const VRegister& fm, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); fcsel(fd, fn, fm, cond); } -void TurboAssembler::Fcvt(const VRegister& fd, const VRegister& fn) { +void MacroAssembler::Fcvt(const VRegister& fd, const VRegister& fn) { DCHECK(allow_macro_instructions()); fcvt(fd, fn); } -void TurboAssembler::Fcvtas(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtas(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtas(rd, fn); } -void TurboAssembler::Fcvtau(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtau(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtau(rd, fn); } -void TurboAssembler::Fcvtms(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtms(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtms(rd, fn); } -void TurboAssembler::Fcvtmu(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtmu(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtmu(rd, fn); } -void TurboAssembler::Fcvtns(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtns(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtns(rd, fn); } -void TurboAssembler::Fcvtnu(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtnu(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtnu(rd, fn); } -void TurboAssembler::Fcvtzs(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtzs(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtzs(rd, fn); } -void TurboAssembler::Fcvtzu(const Register& rd, const VRegister& fn) { +void MacroAssembler::Fcvtzu(const Register& rd, const VRegister& fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fcvtzu(rd, fn); } -void TurboAssembler::Fdiv(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fdiv(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fdiv(fd, fn, fm); @@ -628,7 +628,7 @@ void MacroAssembler::Fmadd(const VRegister& fd, const VRegister& fn, fmadd(fd, fn, fm, fa); } -void TurboAssembler::Fmax(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fmax(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fmax(fd, fn, fm); @@ -640,7 +640,7 @@ void MacroAssembler::Fmaxnm(const VRegister& fd, const VRegister& fn, fmaxnm(fd, fn, fm); } -void TurboAssembler::Fmin(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fmin(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fmin(fd, fn, fm); @@ -652,7 +652,7 @@ void MacroAssembler::Fminnm(const VRegister& fd, const VRegister& fn, fminnm(fd, fn, fm); } -void TurboAssembler::Fmov(VRegister fd, VRegister fn) { +void MacroAssembler::Fmov(VRegister fd, VRegister fn) { DCHECK(allow_macro_instructions()); // Only emit an instruction if fd and fn are different, and they are both D // registers. fmov(s0, s0) is not a no-op because it clears the top word of @@ -663,12 +663,12 @@ void TurboAssembler::Fmov(VRegister fd, VRegister fn) { } } -void TurboAssembler::Fmov(VRegister fd, Register rn) { +void MacroAssembler::Fmov(VRegister fd, Register rn) { DCHECK(allow_macro_instructions()); fmov(fd, rn); } -void TurboAssembler::Fmov(VRegister vd, double imm) { +void MacroAssembler::Fmov(VRegister vd, double imm) { DCHECK(allow_macro_instructions()); if (vd.Is1S() || vd.Is2S() || vd.Is4S()) { @@ -696,7 +696,7 @@ void TurboAssembler::Fmov(VRegister vd, double imm) { } } -void TurboAssembler::Fmov(VRegister vd, float imm) { +void MacroAssembler::Fmov(VRegister vd, float imm) { DCHECK(allow_macro_instructions()); if (vd.Is1D() || vd.Is2D()) { Fmov(vd, static_cast(imm)); @@ -723,7 +723,7 @@ void TurboAssembler::Fmov(VRegister vd, float imm) { } } -void TurboAssembler::Fmov(Register rd, VRegister fn) { +void MacroAssembler::Fmov(Register rd, VRegister fn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); fmov(rd, fn); @@ -735,7 +735,7 @@ void MacroAssembler::Fmsub(const VRegister& fd, const VRegister& fn, fmsub(fd, fn, fm, fa); } -void TurboAssembler::Fmul(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fmul(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fmul(fd, fn, fm); @@ -753,7 +753,7 @@ void MacroAssembler::Fnmsub(const VRegister& fd, const VRegister& fn, fnmsub(fd, fn, fm, fa); } -void TurboAssembler::Fsub(const VRegister& fd, const VRegister& fn, +void MacroAssembler::Fsub(const VRegister& fd, const VRegister& fn, const VRegister& fm) { DCHECK(allow_macro_instructions()); fsub(fd, fn, fm); @@ -769,52 +769,52 @@ void MacroAssembler::Hlt(int code) { hlt(code); } -void TurboAssembler::Isb() { +void MacroAssembler::Isb() { DCHECK(allow_macro_instructions()); isb(); } -void TurboAssembler::Ldr(const CPURegister& rt, const Operand& operand) { +void MacroAssembler::Ldr(const CPURegister& rt, const Operand& operand) { DCHECK(allow_macro_instructions()); ldr(rt, operand); } -void TurboAssembler::Lsl(const Register& rd, const Register& rn, +void MacroAssembler::Lsl(const Register& rd, const Register& rn, unsigned shift) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lsl(rd, rn, shift); } -void TurboAssembler::Lsl(const Register& rd, const Register& rn, +void MacroAssembler::Lsl(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lslv(rd, rn, rm); } -void TurboAssembler::Lsr(const Register& rd, const Register& rn, +void MacroAssembler::Lsr(const Register& rd, const Register& rn, unsigned shift) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lsr(rd, rn, shift); } -void TurboAssembler::Lsr(const Register& rd, const Register& rn, +void MacroAssembler::Lsr(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); lsrv(rd, rn, rm); } -void TurboAssembler::Madd(const Register& rd, const Register& rn, +void MacroAssembler::Madd(const Register& rd, const Register& rn, const Register& rm, const Register& ra) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); madd(rd, rn, rm, ra); } -void TurboAssembler::Mneg(const Register& rd, const Register& rn, +void MacroAssembler::Mneg(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -827,44 +827,38 @@ void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) { movk(rd, imm, shift); } -void TurboAssembler::Mrs(const Register& rt, SystemRegister sysreg) { +void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) { DCHECK(allow_macro_instructions()); DCHECK(!rt.IsZero()); mrs(rt, sysreg); } -void TurboAssembler::Msr(SystemRegister sysreg, const Register& rt) { +void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) { DCHECK(allow_macro_instructions()); msr(sysreg, rt); } -void TurboAssembler::Msub(const Register& rd, const Register& rn, +void MacroAssembler::Msub(const Register& rd, const Register& rn, const Register& rm, const Register& ra) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); msub(rd, rn, rm, ra); } -void TurboAssembler::Mul(const Register& rd, const Register& rn, +void MacroAssembler::Mul(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); mul(rd, rn, rm); } -void TurboAssembler::Rbit(const Register& rd, const Register& rn) { +void MacroAssembler::Rbit(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rbit(rd, rn); } -void TurboAssembler::Rev(const Register& rd, const Register& rn) { - DCHECK(allow_macro_instructions()); - DCHECK(!rd.IsZero()); - rev(rd, rn); -} - -void TurboAssembler::Ret(const Register& xn) { +void MacroAssembler::Ret(const Register& xn) { DCHECK(allow_macro_instructions()); DCHECK(!xn.IsZero()); ret(xn); @@ -877,46 +871,46 @@ void MacroAssembler::Rev(const Register& rd, const Register& rn) { rev(rd, rn); } -void TurboAssembler::Rev16(const Register& rd, const Register& rn) { +void MacroAssembler::Rev16(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rev16(rd, rn); } -void TurboAssembler::Rev32(const Register& rd, const Register& rn) { +void MacroAssembler::Rev32(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rev32(rd, rn); } -void TurboAssembler::Ror(const Register& rd, const Register& rs, +void MacroAssembler::Ror(const Register& rd, const Register& rs, unsigned shift) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); ror(rd, rs, shift); } -void TurboAssembler::Ror(const Register& rd, const Register& rn, +void MacroAssembler::Ror(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); rorv(rd, rn, rm); } -void TurboAssembler::Sbfx(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Sbfx(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sbfx(rd, rn, lsb, width); } -void TurboAssembler::Scvtf(const VRegister& fd, const Register& rn, +void MacroAssembler::Scvtf(const VRegister& fd, const Register& rn, unsigned fbits) { DCHECK(allow_macro_instructions()); scvtf(fd, rn, fbits); } -void TurboAssembler::Sdiv(const Register& rd, const Register& rn, +void MacroAssembler::Sdiv(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -937,80 +931,80 @@ void MacroAssembler::Smsubl(const Register& rd, const Register& rn, smsubl(rd, rn, rm, ra); } -void TurboAssembler::Smull(const Register& rd, const Register& rn, +void MacroAssembler::Smull(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); smull(rd, rn, rm); } -void TurboAssembler::Smulh(const Register& rd, const Register& rn, +void MacroAssembler::Smulh(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); smulh(rd, rn, rm); } -void TurboAssembler::Umull(const Register& rd, const Register& rn, +void MacroAssembler::Umull(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); umaddl(rd, rn, rm, xzr); } -void TurboAssembler::Umulh(const Register& rd, const Register& rn, +void MacroAssembler::Umulh(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); umulh(rd, rn, rm); } -void TurboAssembler::Sxtb(const Register& rd, const Register& rn) { +void MacroAssembler::Sxtb(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sxtb(rd, rn); } -void TurboAssembler::Sxth(const Register& rd, const Register& rn) { +void MacroAssembler::Sxth(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sxth(rd, rn); } -void TurboAssembler::Sxtw(const Register& rd, const Register& rn) { +void MacroAssembler::Sxtw(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sxtw(rd, rn); } -void TurboAssembler::Ubfiz(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Ubfiz(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); ubfiz(rd, rn, lsb, width); } -void TurboAssembler::Sbfiz(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Sbfiz(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); sbfiz(rd, rn, lsb, width); } -void TurboAssembler::Ubfx(const Register& rd, const Register& rn, unsigned lsb, +void MacroAssembler::Ubfx(const Register& rd, const Register& rn, unsigned lsb, unsigned width) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); ubfx(rd, rn, lsb, width); } -void TurboAssembler::Ucvtf(const VRegister& fd, const Register& rn, +void MacroAssembler::Ucvtf(const VRegister& fd, const Register& rn, unsigned fbits) { DCHECK(allow_macro_instructions()); ucvtf(fd, rn, fbits); } -void TurboAssembler::Udiv(const Register& rd, const Register& rn, +void MacroAssembler::Udiv(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -1031,25 +1025,25 @@ void MacroAssembler::Umsubl(const Register& rd, const Register& rn, umsubl(rd, rn, rm, ra); } -void TurboAssembler::Uxtb(const Register& rd, const Register& rn) { +void MacroAssembler::Uxtb(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); uxtb(rd, rn); } -void TurboAssembler::Uxth(const Register& rd, const Register& rn) { +void MacroAssembler::Uxth(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); uxth(rd, rn); } -void TurboAssembler::Uxtw(const Register& rd, const Register& rn) { +void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); uxtw(rd, rn); } -void TurboAssembler::InitializeRootRegister() { +void MacroAssembler::InitializeRootRegister() { ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); Mov(kRootRegister, Operand(isolate_root)); #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE @@ -1057,15 +1051,15 @@ void TurboAssembler::InitializeRootRegister() { #endif } -void TurboAssembler::SmiTag(Register dst, Register src) { +void MacroAssembler::SmiTag(Register dst, Register src) { DCHECK(dst.Is64Bits() && src.Is64Bits()); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); Lsl(dst, src, kSmiShift); } -void TurboAssembler::SmiTag(Register smi) { SmiTag(smi, smi); } +void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); } -void TurboAssembler::SmiUntag(Register dst, Register src) { +void MacroAssembler::SmiUntag(Register dst, Register src) { DCHECK(dst.Is64Bits() && src.Is64Bits()); if (v8_flags.enable_slow_asserts) { AssertSmi(src); @@ -1078,7 +1072,7 @@ void TurboAssembler::SmiUntag(Register dst, Register src) { } } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { DCHECK(dst.Is64Bits()); if (SmiValuesAre32Bits()) { if (src.IsImmediateOffset() && src.shift_amount() == 0) { @@ -1104,11 +1098,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } -void TurboAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } +void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } -void TurboAssembler::SmiToInt32(Register smi) { SmiToInt32(smi, smi); } +void MacroAssembler::SmiToInt32(Register smi) { SmiToInt32(smi, smi); } -void TurboAssembler::SmiToInt32(Register dst, Register smi) { +void MacroAssembler::SmiToInt32(Register dst, Register smi) { DCHECK(dst.Is64Bits()); if (v8_flags.enable_slow_asserts) { AssertSmi(smi); @@ -1121,7 +1115,7 @@ void TurboAssembler::SmiToInt32(Register dst, Register smi) { } } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label, Label* not_smi_label) { static_assert((kSmiTagSize == 1) && (kSmiTag == 0)); // Check if the tag bit is set. @@ -1136,11 +1130,11 @@ void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, } } -void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { CompareAndBranch(x, y, eq, dest); } -void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { CompareAndBranch(x, y, lt, dest); } @@ -1154,10 +1148,10 @@ inline void MacroAssembler::AssertFeedbackVector(Register object) { AssertFeedbackVector(object, scratch); } -void TurboAssembler::jmp(Label* L) { B(L); } +void MacroAssembler::jmp(Label* L) { B(L); } -template -void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, +template +void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3) { DCHECK(AreSameSizeAndType(src0, src1, src2, src3)); DCHECK_IMPLIES((lr_mode == kSignLR), ((src0 == lr) || (src1 == lr) || @@ -1178,8 +1172,8 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, PushHelper(count, size, src0, src1, src2, src3); } -template -void TurboAssembler::Push(const Register& src0, const VRegister& src1) { +template +void MacroAssembler::Push(const Register& src0, const VRegister& src1) { DCHECK_IMPLIES((lr_mode == kSignLR), ((src0 == lr) || (src1 == lr))); DCHECK_IMPLIES((lr_mode == kDontStoreLR), ((src0 != lr) && (src1 != lr))); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY @@ -1197,8 +1191,8 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) { str(src0, MemOperand(sp, src1.SizeInBytes())); } -template -void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, +template +void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3) { // It is not valid to pop into the same register more than once in one // instruction, not even into the zero register. @@ -1224,8 +1218,8 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, #endif } -template -void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) { +template +void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { DCHECK_IMPLIES((lr_mode == kSignLR), (src == lr)); DCHECK_IMPLIES((lr_mode == kDontStoreLR), (src != lr)); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY @@ -1244,8 +1238,8 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) { Str(src, MemOperand(sp, offset)); } -template -void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) { +template +void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { if (offset.IsImmediate()) { DCHECK_GE(offset.ImmediateValue(), 0); } else if (v8_flags.debug_code) { @@ -1264,7 +1258,7 @@ void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) { #endif } -void TurboAssembler::Claim(int64_t count, uint64_t unit_size) { +void MacroAssembler::Claim(int64_t count, uint64_t unit_size) { DCHECK_GE(count, 0); uint64_t size = count * unit_size; @@ -1282,7 +1276,7 @@ void TurboAssembler::Claim(int64_t count, uint64_t unit_size) { Sub(sp, sp, size); } -void TurboAssembler::Claim(const Register& count, uint64_t unit_size) { +void MacroAssembler::Claim(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; DCHECK(base::bits::IsPowerOfTwo(unit_size)); @@ -1323,7 +1317,7 @@ void TurboAssembler::Claim(const Register& count, uint64_t unit_size) { #endif } -void TurboAssembler::Drop(int64_t count, uint64_t unit_size) { +void MacroAssembler::Drop(int64_t count, uint64_t unit_size) { DCHECK_GE(count, 0); uint64_t size = count * unit_size; @@ -1335,7 +1329,7 @@ void TurboAssembler::Drop(int64_t count, uint64_t unit_size) { DCHECK_EQ(size % 16, 0); } -void TurboAssembler::Drop(const Register& count, uint64_t unit_size) { +void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { if (unit_size == 0) return; DCHECK(base::bits::IsPowerOfTwo(unit_size)); @@ -1350,7 +1344,7 @@ void TurboAssembler::Drop(const Register& count, uint64_t unit_size) { Add(sp, sp, size); } -void TurboAssembler::DropArguments(const Register& count, +void MacroAssembler::DropArguments(const Register& count, ArgumentsCountMode mode) { int extra_slots = 1; // Padding slot. if (mode == kCountExcludesReceiver) { @@ -1364,7 +1358,7 @@ void TurboAssembler::DropArguments(const Register& count, Drop(tmp, kXRegSize); } -void TurboAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) { +void MacroAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) { if (mode == kCountExcludesReceiver) { // Add a slot for the receiver. ++count; @@ -1372,13 +1366,13 @@ void TurboAssembler::DropArguments(int64_t count, ArgumentsCountMode mode) { Drop(RoundUp(count, 2), kXRegSize); } -void TurboAssembler::DropSlots(int64_t count) { +void MacroAssembler::DropSlots(int64_t count) { Drop(RoundUp(count, 2), kXRegSize); } -void TurboAssembler::PushArgument(const Register& arg) { Push(padreg, arg); } +void MacroAssembler::PushArgument(const Register& arg) { Push(padreg, arg); } -void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, +void MacroAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, Condition cond, Label* label) { if (rhs.IsImmediate() && (rhs.ImmediateValue() == 0) && ((cond == eq) || (cond == ne) || (cond == hi) || (cond == ls))) { @@ -1393,7 +1387,7 @@ void TurboAssembler::CompareAndBranch(const Register& lhs, const Operand& rhs, } } -void TurboAssembler::CompareTaggedAndBranch(const Register& lhs, +void MacroAssembler::CompareTaggedAndBranch(const Register& lhs, const Operand& rhs, Condition cond, Label* label) { if (COMPRESS_POINTERS_BOOL) { @@ -1403,7 +1397,7 @@ void TurboAssembler::CompareTaggedAndBranch(const Register& lhs, } } -void TurboAssembler::TestAndBranchIfAnySet(const Register& reg, +void MacroAssembler::TestAndBranchIfAnySet(const Register& reg, const uint64_t bit_pattern, Label* label) { int bits = reg.SizeInBits(); @@ -1416,7 +1410,7 @@ void TurboAssembler::TestAndBranchIfAnySet(const Register& reg, } } -void TurboAssembler::TestAndBranchIfAllClear(const Register& reg, +void MacroAssembler::TestAndBranchIfAllClear(const Register& reg, const uint64_t bit_pattern, Label* label) { int bits = reg.SizeInBits(); @@ -1429,7 +1423,7 @@ void TurboAssembler::TestAndBranchIfAllClear(const Register& reg, } } -void TurboAssembler::MoveHeapNumber(Register dst, double value) { +void MacroAssembler::MoveHeapNumber(Register dst, double value) { Mov(dst, Operand::EmbeddedHeapNumber(value)); } diff --git a/src/codegen/arm64/macro-assembler-arm64.cc b/src/codegen/arm64/macro-assembler-arm64.cc index c7f875d266..f79b1f9046 100644 --- a/src/codegen/arm64/macro-assembler-arm64.cc +++ b/src/codegen/arm64/macro-assembler-arm64.cc @@ -39,9 +39,9 @@ namespace v8 { namespace internal { -CPURegList TurboAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); } +CPURegList MacroAssembler::DefaultTmpList() { return CPURegList(ip0, ip1); } -CPURegList TurboAssembler::DefaultFPTmpList() { +CPURegList MacroAssembler::DefaultFPTmpList() { return CPURegList(fp_scratch1, fp_scratch2); } @@ -57,7 +57,7 @@ constexpr int kStackSavedSavedFPSizeInBits = kDRegSizeInBits; } // namespace -void TurboAssembler::PushCPURegList(CPURegList registers) { +void MacroAssembler::PushCPURegList(CPURegList registers) { // If LR was stored here, we would need to sign it if // V8_ENABLE_CONTROL_FLOW_INTEGRITY is on. DCHECK(!registers.IncludesAliasOf(lr)); @@ -77,7 +77,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) { } } -void TurboAssembler::PopCPURegList(CPURegList registers) { +void MacroAssembler::PopCPURegList(CPURegList registers) { int size = registers.RegisterSizeInBytes(); DCHECK_EQ(0, (size * registers.Count()) % 16); @@ -139,7 +139,7 @@ void MacroAssembler::PopAll(RegList reglist) { } } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) const { auto list = kCallerSaved; list.Remove(exclusion); @@ -155,7 +155,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); auto list = kCallerSaved; @@ -175,7 +175,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); int bytes = 0; if (fp_mode == SaveFPRegsMode::kSave) { @@ -195,7 +195,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { return bytes; } -void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn, +void MacroAssembler::LogicalMacro(const Register& rd, const Register& rn, const Operand& operand, LogicalOp op) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -303,7 +303,7 @@ void TurboAssembler::LogicalMacro(const Register& rd, const Register& rn, } } -void TurboAssembler::Mov(const Register& rd, uint64_t imm) { +void MacroAssembler::Mov(const Register& rd, uint64_t imm) { DCHECK(allow_macro_instructions()); DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); DCHECK(!rd.IsZero()); @@ -379,7 +379,7 @@ void TurboAssembler::Mov(const Register& rd, uint64_t imm) { } } -void TurboAssembler::Mov(const Register& rd, const Operand& operand, +void MacroAssembler::Mov(const Register& rd, const Operand& operand, DiscardMoveMode discard_mode) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -447,11 +447,11 @@ void TurboAssembler::Mov(const Register& rd, const Operand& operand, } } -void TurboAssembler::Mov(const Register& rd, Smi smi) { +void MacroAssembler::Mov(const Register& rd, Smi smi) { return Mov(rd, Operand(smi)); } -void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { +void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { DCHECK(is_uint16(imm)); int byte1 = (imm & 0xFF); int byte2 = ((imm >> 8) & 0xFF); @@ -473,7 +473,7 @@ void TurboAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) { } } -void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { +void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { DCHECK(is_uint32(imm)); uint8_t bytes[sizeof(imm)]; @@ -550,7 +550,7 @@ void TurboAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) { } } -void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { +void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { // All bytes are either 0x00 or 0xFF. { bool all0orff = true; @@ -586,7 +586,7 @@ void TurboAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) { } } -void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, +void MacroAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, int shift_amount) { DCHECK(allow_macro_instructions()); if (shift_amount != 0 || shift != LSL) { @@ -607,7 +607,7 @@ void TurboAssembler::Movi(const VRegister& vd, uint64_t imm, Shift shift, } } -void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { +void MacroAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { // TODO(v8:11033): Move 128-bit values in a more efficient way. DCHECK(vd.Is128Bits()); Movi(vd.V2D(), lo); @@ -619,7 +619,7 @@ void TurboAssembler::Movi(const VRegister& vd, uint64_t hi, uint64_t lo) { } } -void TurboAssembler::Mvn(const Register& rd, const Operand& operand) { +void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { DCHECK(allow_macro_instructions()); if (operand.NeedsRelocation(this)) { @@ -642,7 +642,7 @@ void TurboAssembler::Mvn(const Register& rd, const Operand& operand) { } } -unsigned TurboAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) { +unsigned MacroAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) { DCHECK_EQ(reg_size % 16, 0); #define HALFWORD(idx) (((imm >> ((idx)*16)) & 0xFFFF) ? 1u : 0u) @@ -660,18 +660,18 @@ unsigned TurboAssembler::CountSetHalfWords(uint64_t imm, unsigned reg_size) { // The movz instruction can generate immediates containing an arbitrary 16-bit // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. -bool TurboAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { +bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); return CountSetHalfWords(imm, reg_size) <= 1; } // The movn instruction can generate immediates containing an arbitrary 16-bit // half-word, with remaining bits set, eg. 0xFFFF1234, 0xFFFF1234FFFFFFFF. -bool TurboAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { +bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { return IsImmMovz(~imm, reg_size); } -void TurboAssembler::ConditionalCompareMacro(const Register& rn, +void MacroAssembler::ConditionalCompareMacro(const Register& rn, const Operand& operand, StatusFlags nzcv, Condition cond, ConditionalCompareOp op) { @@ -699,7 +699,7 @@ void TurboAssembler::ConditionalCompareMacro(const Register& rn, } } -void TurboAssembler::Csel(const Register& rd, const Register& rn, +void MacroAssembler::Csel(const Register& rd, const Register& rn, const Operand& operand, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -733,7 +733,7 @@ void TurboAssembler::Csel(const Register& rd, const Register& rn, } } -bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst, +bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst, int64_t imm) { unsigned n, imm_s, imm_r; int reg_size = dst.SizeInBits(); @@ -755,7 +755,7 @@ bool TurboAssembler::TryOneInstrMoveImmediate(const Register& dst, return false; } -Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst, +Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst, int64_t imm, PreShiftImmMode mode) { int reg_size = dst.SizeInBits(); @@ -805,7 +805,7 @@ Operand TurboAssembler::MoveImmediateForShiftedOp(const Register& dst, return Operand(dst); } -void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn, +void MacroAssembler::AddSubMacro(const Register& rd, const Register& rn, const Operand& operand, FlagsUpdate S, AddSubOp op) { if (operand.IsZero() && rd == rn && rd.Is64Bits() && rn.Is64Bits() && @@ -851,7 +851,7 @@ void TurboAssembler::AddSubMacro(const Register& rd, const Register& rn, } } -void TurboAssembler::AddSubWithCarryMacro(const Register& rd, +void MacroAssembler::AddSubWithCarryMacro(const Register& rd, const Register& rn, const Operand& operand, FlagsUpdate S, AddSubWithCarryOp op) { @@ -900,7 +900,7 @@ void TurboAssembler::AddSubWithCarryMacro(const Register& rd, } } -void TurboAssembler::LoadStoreMacro(const CPURegister& rt, +void MacroAssembler::LoadStoreMacro(const CPURegister& rt, const MemOperand& addr, LoadStoreOp op) { int64_t offset = addr.offset(); unsigned size = CalcLSDataSize(op); @@ -930,7 +930,7 @@ void TurboAssembler::LoadStoreMacro(const CPURegister& rt, } } -void TurboAssembler::LoadStorePairMacro(const CPURegister& rt, +void MacroAssembler::LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, const MemOperand& addr, LoadStorePairOp op) { @@ -963,7 +963,7 @@ void TurboAssembler::LoadStorePairMacro(const CPURegister& rt, } } -bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch( +bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( Label* label, ImmBranchType b_type) { bool need_longer_range = false; // There are two situations in which we care about the offset being out of @@ -986,7 +986,7 @@ bool TurboAssembler::NeedExtraInstructionsOrRegisterBranch( return need_longer_range; } -void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { +void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -1020,7 +1020,7 @@ void TurboAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { } } -void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) { +void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { DCHECK((reg == NoReg || type >= kBranchTypeFirstUsingReg) && (bit == -1 || type >= kBranchTypeFirstUsingBit)); if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { @@ -1050,7 +1050,7 @@ void TurboAssembler::B(Label* label, BranchType type, Register reg, int bit) { } } -void TurboAssembler::B(Label* label, Condition cond) { +void MacroAssembler::B(Label* label, Condition cond) { DCHECK(allow_macro_instructions()); DCHECK((cond != al) && (cond != nv)); @@ -1067,7 +1067,7 @@ void TurboAssembler::B(Label* label, Condition cond) { bind(&done); } -void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { +void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { DCHECK(allow_macro_instructions()); Label done; @@ -1083,7 +1083,7 @@ void TurboAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { bind(&done); } -void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { +void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { DCHECK(allow_macro_instructions()); Label done; @@ -1099,7 +1099,7 @@ void TurboAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { bind(&done); } -void TurboAssembler::Cbnz(const Register& rt, Label* label) { +void MacroAssembler::Cbnz(const Register& rt, Label* label) { DCHECK(allow_macro_instructions()); Label done; @@ -1115,7 +1115,7 @@ void TurboAssembler::Cbnz(const Register& rt, Label* label) { bind(&done); } -void TurboAssembler::Cbz(const Register& rt, Label* label) { +void MacroAssembler::Cbz(const Register& rt, Label* label) { DCHECK(allow_macro_instructions()); Label done; @@ -1133,7 +1133,7 @@ void TurboAssembler::Cbz(const Register& rt, Label* label) { // Pseudo-instructions. -void TurboAssembler::Abs(const Register& rd, const Register& rm, +void MacroAssembler::Abs(const Register& rd, const Register& rm, Label* is_not_representable, Label* is_representable) { DCHECK(allow_macro_instructions()); DCHECK(AreSameSizeAndType(rd, rm)); @@ -1154,7 +1154,7 @@ void TurboAssembler::Abs(const Register& rd, const Register& rm, } } -void TurboAssembler::Switch(Register scratch, Register value, +void MacroAssembler::Switch(Register scratch, Register value, int case_value_base, Label** labels, int num_labels) { Register table = scratch; @@ -1178,7 +1178,7 @@ void TurboAssembler::Switch(Register scratch, Register value, // Abstracted stack operations. -void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, +void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3, const CPURegister& src4, const CPURegister& src5, const CPURegister& src6, const CPURegister& src7) { @@ -1192,7 +1192,7 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, PushHelper(count - 4, size, src4, src5, src6, src7); } -void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, +void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3, const CPURegister& dst4, const CPURegister& dst5, const CPURegister& dst6, const CPURegister& dst7) { @@ -1238,7 +1238,7 @@ void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { Bind(&done); } -void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0, +void MacroAssembler::PushHelper(int count, int size, const CPURegister& src0, const CPURegister& src1, const CPURegister& src2, const CPURegister& src3) { @@ -1276,7 +1276,7 @@ void TurboAssembler::PushHelper(int count, int size, const CPURegister& src0, } } -void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0, +void MacroAssembler::PopHelper(int count, int size, const CPURegister& dst0, const CPURegister& dst1, const CPURegister& dst2, const CPURegister& dst3) { // Ensure that we don't unintentially modify scratch or debug registers. @@ -1314,7 +1314,7 @@ void TurboAssembler::PopHelper(int count, int size, const CPURegister& dst0, } } -void TurboAssembler::PokePair(const CPURegister& src1, const CPURegister& src2, +void MacroAssembler::PokePair(const CPURegister& src1, const CPURegister& src2, int offset) { DCHECK(AreSameSizeAndType(src1, src2)); DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0)); @@ -1522,21 +1522,20 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( bind(&maybe_has_optimized_code); Register optimized_code_entry = x7; - LoadAnyTaggedField( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kMaybeOptimizedCodeOffset)); + LoadTaggedField(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kMaybeOptimizedCodeOffset)); TailCallOptimizedCodeSlot(this, optimized_code_entry, x4); } -Condition TurboAssembler::CheckSmi(Register object) { +Condition MacroAssembler::CheckSmi(Register object) { static_assert(kSmiTag == 0); Tst(object, kSmiTagMask); return eq; } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::AssertSpAligned() { +void MacroAssembler::AssertSpAligned() { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); HardAbortScope hard_abort(this); // Avoid calls to Abort. @@ -1549,7 +1548,7 @@ void TurboAssembler::AssertSpAligned() { Check(eq, AbortReason::kUnexpectedStackPointer); } -void TurboAssembler::AssertFPCRState(Register fpcr) { +void MacroAssembler::AssertFPCRState(Register fpcr) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Label unexpected_mode, done; @@ -1573,7 +1572,7 @@ void TurboAssembler::AssertFPCRState(Register fpcr) { Bind(&done); } -void TurboAssembler::AssertSmi(Register object, AbortReason reason) { +void MacroAssembler::AssertSmi(Register object, AbortReason reason) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -1581,7 +1580,7 @@ void TurboAssembler::AssertSmi(Register object, AbortReason reason) { Check(eq, reason); } -void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) { +void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -1589,7 +1588,7 @@ void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) { Check(ne, reason); } -void TurboAssembler::AssertZeroExtended(Register int32_register) { +void MacroAssembler::AssertZeroExtended(Register int32_register) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Tst(int32_register.X(), kMaxUInt32); @@ -1704,7 +1703,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { Bind(&done_checking); } -void TurboAssembler::AssertPositiveOrZero(Register value) { +void MacroAssembler::AssertPositiveOrZero(Register value) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Label done; @@ -1714,18 +1713,18 @@ void TurboAssembler::AssertPositiveOrZero(Register value) { Bind(&done); } -void TurboAssembler::Assert(Condition cond, AbortReason reason) { +void MacroAssembler::Assert(Condition cond, AbortReason reason) { if (v8_flags.debug_code) { Check(cond, reason); } } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) { +void MacroAssembler::CopySlots(int dst, Register src, Register slot_count) { DCHECK(!src.IsZero()); UseScratchRegisterScope scope(this); Register dst_reg = scope.AcquireX(); @@ -1734,7 +1733,7 @@ void TurboAssembler::CopySlots(int dst, Register src, Register slot_count) { CopyDoubleWords(dst_reg, src, slot_count); } -void TurboAssembler::CopySlots(Register dst, Register src, +void MacroAssembler::CopySlots(Register dst, Register src, Register slot_count) { DCHECK(!dst.IsZero() && !src.IsZero()); SlotAddress(dst, dst); @@ -1742,7 +1741,7 @@ void TurboAssembler::CopySlots(Register dst, Register src, CopyDoubleWords(dst, src, slot_count); } -void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count, +void MacroAssembler::CopyDoubleWords(Register dst, Register src, Register count, CopyDoubleWordsMode mode) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, src, count)); @@ -1813,15 +1812,15 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count, Bind(&done); } -void TurboAssembler::SlotAddress(Register dst, int slot_offset) { +void MacroAssembler::SlotAddress(Register dst, int slot_offset) { Add(dst, sp, slot_offset << kSystemPointerSizeLog2); } -void TurboAssembler::SlotAddress(Register dst, Register slot_offset) { +void MacroAssembler::SlotAddress(Register dst, Register slot_offset) { Add(dst, sp, Operand(slot_offset, LSL, kSystemPointerSizeLog2)); } -void TurboAssembler::CanonicalizeNaN(const VRegister& dst, +void MacroAssembler::CanonicalizeNaN(const VRegister& dst, const VRegister& src) { AssertFPCRState(); @@ -1831,7 +1830,7 @@ void TurboAssembler::CanonicalizeNaN(const VRegister& dst, Fsub(dst, src, fp_zero); } -void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { Mov(destination, @@ -1841,10 +1840,10 @@ void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { LoadRoot(destination, index); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); // TODO(v8:13466, olivf): With static roots we could use - // DecompressTaggedPointer here. However, currently all roots have addresses + // DecompressTagged here. However, currently all roots have addresses // that are too large to fit into addition immediate operands. Evidence // suggests that the extra instruction for decompression costs us more than // the load. @@ -1852,7 +1851,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) { MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::PushRoot(RootIndex index) { +void MacroAssembler::PushRoot(RootIndex index) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register tmp = temps.AcquireX(); @@ -1860,14 +1859,14 @@ void TurboAssembler::PushRoot(RootIndex index) { Push(tmp); } -void TurboAssembler::Move(Register dst, Smi src) { Mov(dst, src); } -void TurboAssembler::Move(Register dst, MemOperand src) { Ldr(dst, src); } -void TurboAssembler::Move(Register dst, Register src) { +void MacroAssembler::Move(Register dst, Smi src) { Mov(dst, src); } +void MacroAssembler::Move(Register dst, MemOperand src) { Ldr(dst, src); } +void MacroAssembler::Move(Register dst, Register src) { if (dst == src) return; Mov(dst, src); } -void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, +void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1, Register src1) { DCHECK_NE(dst0, dst1); if (dst0 != src1) { @@ -1883,7 +1882,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, } } -void TurboAssembler::Swap(Register lhs, Register rhs) { +void MacroAssembler::Swap(Register lhs, Register rhs) { DCHECK(lhs.IsSameSizeAndType(rhs)); DCHECK_NE(lhs, rhs); UseScratchRegisterScope temps(this); @@ -1893,7 +1892,7 @@ void TurboAssembler::Swap(Register lhs, Register rhs) { Mov(lhs, temp); } -void TurboAssembler::Swap(VRegister lhs, VRegister rhs) { +void MacroAssembler::Swap(VRegister lhs, VRegister rhs) { DCHECK(lhs.IsSameSizeAndType(rhs)); DCHECK_NE(lhs, rhs); UseScratchRegisterScope temps(this); @@ -1957,7 +1956,7 @@ void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { JumpToExternalReference(ExternalReference::Create(fid)); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_ARM64 // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1973,12 +1972,12 @@ int TurboAssembler::ActivationFrameAlignment() { #endif // V8_HOST_ARCH_ARM64 } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_of_reg_args) { CallCFunction(function, num_of_reg_args, 0); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_of_reg_args, int num_of_double_args) { ASM_CODE_COMMENT(this); @@ -1991,7 +1990,7 @@ void TurboAssembler::CallCFunction(ExternalReference function, static const int kRegisterPassedArguments = 8; static const int kFPRegisterPassedArguments = 8; -void TurboAssembler::CallCFunction(Register function, int num_of_reg_args, +void MacroAssembler::CallCFunction(Register function, int num_of_reg_args, int num_of_double_args) { ASM_CODE_COMMENT(this); DCHECK_LE(num_of_reg_args + num_of_double_args, kMaxCParameters); @@ -2056,21 +2055,21 @@ void TurboAssembler::CallCFunction(Register function, int num_of_reg_args, } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { ASM_CODE_COMMENT(this); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); - LoadTaggedPointerField( - destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt( - constant_index))); + LoadTaggedField(destination, + FieldMemOperand(destination, FixedArray::OffsetOfElementAt( + constant_index))); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { Ldr(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Mov(destination, kRootRegister); @@ -2079,7 +2078,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -2108,7 +2107,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond) { +void MacroAssembler::Jump(Register target, Condition cond) { if (cond == nv) return; Label done; if (cond != al) B(NegateCondition(cond), &done); @@ -2116,7 +2115,7 @@ void TurboAssembler::Jump(Register target, Condition cond) { Bind(&done); } -void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode, +void MacroAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond) { if (cond == nv) return; Label done; @@ -2138,7 +2137,7 @@ void TurboAssembler::JumpHelper(int64_t offset, RelocInfo::Mode rmode, // * the 'target' input unmodified if this is a Wasm call, or // * the offset of the target from the current PC, in instructions, for any // other type of call. -int64_t TurboAssembler::CalculateTargetOffset(Address target, +int64_t MacroAssembler::CalculateTargetOffset(Address target, RelocInfo::Mode rmode, byte* pc) { int64_t offset = static_cast(target); if (rmode == RelocInfo::WASM_CALL || rmode == RelocInfo::WASM_STUB_CALL) { @@ -2152,13 +2151,13 @@ int64_t TurboAssembler::CalculateTargetOffset(Address target, return offset; } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { int64_t offset = CalculateTargetOffset(target, rmode, pc_); JumpHelper(offset, rmode, cond); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -2179,19 +2178,19 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, } } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); Mov(scratch, reference); Jump(scratch); } -void TurboAssembler::Call(Register target) { +void MacroAssembler::Call(Register target) { BlockPoolsScope scope(this); Blr(target); } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { BlockPoolsScope scope(this); if (CanUseNearCallOrJump(rmode)) { int64_t offset = CalculateTargetOffset(target, rmode, pc_); @@ -2202,7 +2201,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) { } } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code)); BlockPoolsScope scope(this); @@ -2224,14 +2223,14 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode) { } } -void TurboAssembler::Call(ExternalReference target) { +void MacroAssembler::Call(ExternalReference target) { UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); Mov(temp, target); Call(temp); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); // The builtin_index register contains the builtin index as a Smi. // Untagging is folded into the indexing operand below. @@ -2254,25 +2253,25 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { } } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { Ldr(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { ASM_CODE_COMMENT(this); LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); switch (options().builtin_call_jump_mode) { case BuiltinCallJumpMode::kAbsolute: { @@ -2310,15 +2309,15 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } // TODO(ishell): remove cond parameter from here to simplify things. -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); // The control flow integrity (CFI) feature allows us to "sign" code entry // points as a target for calls, jumps or both. Arm64 has special // instructions for this purpose, so-called "landing pads" (see - // TurboAssembler::CallTarget(), TurboAssembler::JumpTarget() and - // TurboAssembler::JumpOrCallTarget()). Currently, we generate "Call" + // MacroAssembler::CallTarget(), MacroAssembler::JumpTarget() and + // MacroAssembler::JumpOrCallTarget()). Currently, we generate "Call" // landing pads for CPP builtins. In order to allow tail calling to those // builtins we have to use a workaround. // x17 is used to allow using "Call" (i.e. `bti c`) rather than "Jump" @@ -2360,12 +2359,12 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); Ldr(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -2374,13 +2373,13 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_object, code_object); Call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code_object, code_object); @@ -2392,7 +2391,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { Jump(x17); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { ASM_CODE_COMMENT(this); // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. @@ -2426,7 +2425,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { Bind(&return_location); } -void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) { +void MacroAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); @@ -2434,7 +2433,7 @@ void TurboAssembler::IndirectCall(Address target, RelocInfo::Mode rmode) { Blr(temp); } -bool TurboAssembler::IsNearCallOffset(int64_t offset) { +bool MacroAssembler::IsNearCallOffset(int64_t offset) { return is_int26(offset); } @@ -2445,12 +2444,12 @@ bool TurboAssembler::IsNearCallOffset(int64_t offset) { // the flags in the referenced {Code} object; // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. -void TurboAssembler::BailoutIfDeoptimized() { +void MacroAssembler::BailoutIfDeoptimized() { UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; - LoadTaggedPointerField(scratch, - MemOperand(kJavaScriptCallCodeStartRegister, offset)); + LoadTaggedField(scratch, + MemOperand(kJavaScriptCallCodeStartRegister, offset)); Ldr(scratch.W(), FieldMemOperand(scratch, Code::kKindSpecificFlagsOffset)); Label not_deoptimized; Tbz(scratch.W(), InstructionStream::kMarkedForDeoptimizationBit, @@ -2460,7 +2459,7 @@ void TurboAssembler::BailoutIfDeoptimized() { Bind(¬_deoptimized); } -void TurboAssembler::CallForDeoptimization( +void MacroAssembler::CallForDeoptimization( Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret, Label* jump_deoptimization_entry_label) { ASM_CODE_COMMENT(this); @@ -2479,10 +2478,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); Ldr(destination, MemOperand(kRootRegister, offset)); } @@ -2663,8 +2662,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, // allow recompilation to take effect without changing any of the // call sites. Register code = kJavaScriptCallCodeStartRegister; - LoadTaggedPointerField(code, - FieldMemOperand(function, JSFunction::kCodeOffset)); + LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset)); switch (type) { case InvokeType::kCall: CallCodeObject(code); @@ -2715,12 +2713,11 @@ void MacroAssembler::InvokeFunctionWithNewTarget( Register expected_parameter_count = x2; - LoadTaggedPointerField(cp, - FieldMemOperand(function, JSFunction::kContextOffset)); + LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset)); // The number of arguments is stored as an int32_t, and -1 is a marker // (kDontAdaptArgumentsSentinel), so we need sign // extension to correctly handle it. - LoadTaggedPointerField( + LoadTaggedField( expected_parameter_count, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); Ldrh(expected_parameter_count, @@ -2744,14 +2741,13 @@ void MacroAssembler::InvokeFunction(Register function, DCHECK_EQ(function, x1); // Set up the context. - LoadTaggedPointerField(cp, - FieldMemOperand(function, JSFunction::kContextOffset)); + LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset)); InvokeFunctionCode(function, no_reg, expected_parameter_count, actual_parameter_count, type); } -void TurboAssembler::TryConvertDoubleToInt64(Register result, +void MacroAssembler::TryConvertDoubleToInt64(Register result, DoubleRegister double_input, Label* done) { ASM_CODE_COMMENT(this); @@ -2776,7 +2772,7 @@ void TurboAssembler::TryConvertDoubleToInt64(Register result, B(vc, done); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode, @@ -2795,9 +2791,9 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, // If we fell through then inline version didn't succeed - call stub instead. if (lr_status == kLRHasNotBeenSaved) { - Push(lr, double_input); + Push(lr, double_input); } else { - Push(xzr, double_input); + Push(xzr, double_input); } // DoubleToI preserves any registers it needs to clobber. @@ -2817,7 +2813,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, if (lr_status == kLRHasNotBeenSaved) { // Pop into xzr here to drop the double input on the stack: - Pop(xzr, lr); + Pop(xzr, lr); } else { Drop(2); } @@ -2827,21 +2823,21 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Uxtw(result.W(), result.W()); } -void TurboAssembler::Prologue() { +void MacroAssembler::Prologue() { ASM_CODE_COMMENT(this); - Push(lr, fp); + Push(lr, fp); mov(fp, sp); static_assert(kExtraSlotClaimedByPrologue == 1); Push(cp, kJSFunctionRegister, kJavaScriptCallArgCountRegister, padreg); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { UseScratchRegisterScope temps(this); if (StackFrame::IsJavaScript(type)) { // Just push a minimal "machine frame", saving the frame pointer and return // address, without any markers. - Push(lr, fp); + Push(lr, fp); Mov(fp, sp); // sp[1] : lr // sp[0] : fp @@ -2860,7 +2856,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { } else { fourth_reg = padreg; } - Push(lr, fp, type_reg, fourth_reg); + Push(lr, fp, type_reg, fourth_reg); static constexpr int kSPToFPDelta = 2 * kSystemPointerSize; Add(fp, sp, kSPToFPDelta); // sp[3] : lr @@ -2870,12 +2866,12 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { } } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); // Drop the execution stack down to the frame pointer and restore // the caller frame pointer and return address. Mov(sp, fp); - Pop(fp, lr); + Pop(fp, lr); } void MacroAssembler::EnterExitFrame(const Register& scratch, int extra_space, @@ -2885,7 +2881,7 @@ void MacroAssembler::EnterExitFrame(const Register& scratch, int extra_space, frame_type == StackFrame::BUILTIN_EXIT); // Set up the new stack frame. - Push(lr, fp); + Push(lr, fp); Mov(fp, sp); Mov(scratch, StackFrame::TypeToMarker(frame_type)); Push(scratch, xzr); @@ -2961,7 +2957,7 @@ void MacroAssembler::LeaveExitFrame(const Register& scratch, // fp -> fp[0]: CallerFP (old fp) // fp[...]: The rest of the frame. Mov(sp, fp); - Pop(fp, lr); + Pop(fp, lr); } void MacroAssembler::LoadGlobalProxy(Register dst) { @@ -3010,9 +3006,9 @@ void MacroAssembler::CompareObjectType(Register object, Register map, CompareInstanceType(map, type_reg, type); } -void TurboAssembler::LoadMap(Register dst, Register object) { +void MacroAssembler::LoadMap(Register dst, Register object) { ASM_CODE_COMMENT(this); - LoadTaggedPointerField(dst, FieldMemOperand(object, HeapObject::kMapOffset)); + LoadTaggedField(dst, FieldMemOperand(object, HeapObject::kMapOffset)); } // Sets condition flags based on comparison, and returns type in type_reg. @@ -3086,25 +3082,16 @@ void MacroAssembler::JumpIfIsInRange(const Register& value, } } -void TurboAssembler::LoadTaggedPointerField(const Register& destination, - const MemOperand& field_operand) { +void MacroAssembler::LoadTaggedField(const Register& destination, + const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { - DecompressTaggedPointer(destination, field_operand); + DecompressTagged(destination, field_operand); } else { Ldr(destination, field_operand); } } -void TurboAssembler::LoadAnyTaggedField(const Register& destination, - const MemOperand& field_operand) { - if (COMPRESS_POINTERS_BOOL) { - DecompressAnyTagged(destination, field_operand); - } else { - Ldr(destination, field_operand); - } -} - -void TurboAssembler::LoadTaggedSignedField(const Register& destination, +void MacroAssembler::LoadTaggedSignedField(const Register& destination, const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedSigned(destination, field_operand); @@ -3113,11 +3100,11 @@ void TurboAssembler::LoadTaggedSignedField(const Register& destination, } } -void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) { SmiUntag(dst, src); } -void TurboAssembler::StoreTaggedField(const Register& value, +void MacroAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand) { if (COMPRESS_POINTERS_BOOL) { Str(value.W(), dst_field_operand); @@ -3126,7 +3113,7 @@ void TurboAssembler::StoreTaggedField(const Register& value, } } -void TurboAssembler::AtomicStoreTaggedField(const Register& value, +void MacroAssembler::AtomicStoreTaggedField(const Register& value, const Register& dst_base, const Register& dst_index, const Register& temp) { @@ -3138,7 +3125,7 @@ void TurboAssembler::AtomicStoreTaggedField(const Register& value, } } -void TurboAssembler::DecompressTaggedSigned(const Register& destination, +void MacroAssembler::DecompressTaggedSigned(const Register& destination, const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Ldr(destination.W(), field_operand); @@ -3149,21 +3136,21 @@ void TurboAssembler::DecompressTaggedSigned(const Register& destination, } } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, - const MemOperand& field_operand) { +void MacroAssembler::DecompressTagged(const Register& destination, + const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Ldr(destination.W(), field_operand); Add(destination, kPtrComprCageBaseRegister, destination); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, - const Register& source) { +void MacroAssembler::DecompressTagged(const Register& destination, + const Register& source) { ASM_CODE_COMMENT(this); Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW)); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, - Tagged_t immediate) { +void MacroAssembler::DecompressTagged(const Register& destination, + Tagged_t immediate) { ASM_CODE_COMMENT(this); if (IsImmAddSub(immediate)) { Add(destination, kPtrComprCageBaseRegister, @@ -3178,14 +3165,7 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination, } } -void TurboAssembler::DecompressAnyTagged(const Register& destination, - const MemOperand& field_operand) { - ASM_CODE_COMMENT(this); - Ldr(destination.W(), field_operand); - Add(destination, kPtrComprCageBaseRegister, destination); -} - -void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination, +void MacroAssembler::AtomicDecompressTaggedSigned(const Register& destination, const Register& base, const Register& index, const Register& temp) { @@ -3199,27 +3179,17 @@ void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination, } } -void TurboAssembler::AtomicDecompressTaggedPointer(const Register& destination, - const Register& base, - const Register& index, - const Register& temp) { +void MacroAssembler::AtomicDecompressTagged(const Register& destination, + const Register& base, + const Register& index, + const Register& temp) { ASM_CODE_COMMENT(this); Add(temp, base, index); Ldar(destination.W(), temp); Add(destination, kPtrComprCageBaseRegister, destination); } -void TurboAssembler::AtomicDecompressAnyTagged(const Register& destination, - const Register& base, - const Register& index, - const Register& temp) { - ASM_CODE_COMMENT(this); - Add(temp, base, index); - Ldar(destination.W(), temp); - Add(destination, kPtrComprCageBaseRegister, destination); -} - -void TurboAssembler::CheckPageFlag(const Register& object, int mask, +void MacroAssembler::CheckPageFlag(const Register& object, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -3273,7 +3243,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Bind(&done); } -void TurboAssembler::DecodeSandboxedPointer(const Register& value) { +void MacroAssembler::DecodeSandboxedPointer(const Register& value) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX Add(value, kPtrComprCageBaseRegister, @@ -3283,7 +3253,7 @@ void TurboAssembler::DecodeSandboxedPointer(const Register& value) { #endif } -void TurboAssembler::LoadSandboxedPointerField( +void MacroAssembler::LoadSandboxedPointerField( const Register& destination, const MemOperand& field_operand) { #ifdef V8_ENABLE_SANDBOX ASM_CODE_COMMENT(this); @@ -3294,7 +3264,7 @@ void TurboAssembler::LoadSandboxedPointerField( #endif } -void TurboAssembler::StoreSandboxedPointerField( +void MacroAssembler::StoreSandboxedPointerField( const Register& value, const MemOperand& dst_field_operand) { #ifdef V8_ENABLE_SANDBOX ASM_CODE_COMMENT(this); @@ -3308,7 +3278,7 @@ void TurboAssembler::StoreSandboxedPointerField( #endif } -void TurboAssembler::LoadExternalPointerField(Register destination, +void MacroAssembler::LoadExternalPointerField(Register destination, MemOperand field_operand, ExternalPointerTag tag, Register isolate_root) { @@ -3340,66 +3310,67 @@ void TurboAssembler::LoadExternalPointerField(Register destination, #endif // V8_ENABLE_SANDBOX } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { - if (registers.is_empty()) return; - ASM_CODE_COMMENT(this); - CPURegList regs(kXRegSizeInBits, registers); - // If we were saving LR, we might need to sign it. - DCHECK(!regs.IncludesAliasOf(lr)); - regs.Align(); - PushCPURegList(regs); +void MacroAssembler::MaybeSaveRegisters(RegList registers) { + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + CPURegList regs(kXRegSizeInBits, registers); + // If we were saving LR, we might need to sign it. + DCHECK(!regs.IncludesAliasOf(lr)); + regs.Align(); + PushCPURegList(regs); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { - if (registers.is_empty()) return; - ASM_CODE_COMMENT(this); - CPURegList regs(kXRegSizeInBits, registers); - // If we were saving LR, we might need to sign it. - DCHECK(!regs.IncludesAliasOf(lr)); - regs.Align(); - PopCPURegList(regs); +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { + if (registers.is_empty()) return; + ASM_CODE_COMMENT(this); + CPURegList regs(kXRegSizeInBits, registers); + // If we were saving LR, we might need to sign it. + DCHECK(!regs.IncludesAliasOf(lr)); + regs.Align(); + PopCPURegList(regs); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode) { - ASM_CODE_COMMENT(this); - RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); - MaybeSaveRegisters(registers); + ASM_CODE_COMMENT(this); + RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); + MaybeSaveRegisters(registers); - MoveObjectAndSlot(WriteBarrierDescriptor::ObjectRegister(), - WriteBarrierDescriptor::SlotAddressRegister(), object, - offset); + MoveObjectAndSlot(WriteBarrierDescriptor::ObjectRegister(), + WriteBarrierDescriptor::SlotAddressRegister(), object, + offset); - Call(isolate()->builtins()->code_handle( - Builtins::GetEphemeronKeyBarrierStub(fp_mode)), - RelocInfo::CODE_TARGET); - MaybeRestoreRegisters(registers); + Call(isolate()->builtins()->code_handle( + Builtins::GetEphemeronKeyBarrierStub(fp_mode)), + RelocInfo::CODE_TARGET); + MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode) { - ASM_CODE_COMMENT(this); - RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); - MaybeSaveRegisters(registers); + ASM_CODE_COMMENT(this); + RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); + MaybeSaveRegisters(registers); - Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); - Register slot_address_parameter = - WriteBarrierDescriptor::SlotAddressRegister(); - MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset); + Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); + Register slot_address_parameter = + WriteBarrierDescriptor::SlotAddressRegister(); + MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset); - CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, mode); + CallRecordWriteStub(object_parameter, slot_address_parameter, fp_mode, + mode); - MaybeRestoreRegisters(registers); + MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { - ASM_CODE_COMMENT(this); - DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); - DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); + ASM_CODE_COMMENT(this); + DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); + DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); #if V8_ENABLE_WEBASSEMBLY if (mode == StubCallMode::kCallWasmRuntimeStub) { auto wasm_target = wasm::WasmCode::GetRecordWriteStub(fp_mode); @@ -3413,7 +3384,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, } } -void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, +void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset) { ASM_CODE_COMMENT(this); DCHECK_NE(dst_object, dst_slot); @@ -3464,7 +3435,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, Register temp = temps.AcquireX(); DCHECK(!AreAliased(object, value, temp)); Add(temp, object, offset); - LoadTaggedPointerField(temp, MemOperand(temp)); + LoadTaggedField(temp, MemOperand(temp)); Cmp(temp, value); Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite); } @@ -3490,7 +3461,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, // Record the actual write. if (lr_status == kLRHasNotBeenSaved) { - Push(padreg, lr); + Push(padreg, lr); } Register slot_address = WriteBarrierDescriptor::SlotAddressRegister(); DCHECK(!AreAliased(object, slot_address, value)); @@ -3499,14 +3470,14 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, Add(slot_address, object, offset); CallRecordWriteStub(object, slot_address, fp_mode); if (lr_status == kLRHasNotBeenSaved) { - Pop(lr, padreg); + Pop(lr, padreg); } if (v8_flags.debug_code) Mov(slot_address, Operand(kZapValue)); Bind(&done); } -void TurboAssembler::Check(Condition cond, AbortReason reason) { +void MacroAssembler::Check(Condition cond, AbortReason reason) { Label ok; B(cond, &ok); Abort(reason); @@ -3514,10 +3485,10 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) { Bind(&ok); } -void TurboAssembler::Trap() { Brk(0); } -void TurboAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); } +void MacroAssembler::Trap() { Brk(0); } +void MacroAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { ASM_CODE_COMMENT(this); if (v8_flags.code_comments) { RecordComment("Abort message: "); @@ -3571,10 +3542,10 @@ void TurboAssembler::Abort(AbortReason reason) { void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { LoadMap(dst, cp); - LoadTaggedPointerField( + LoadTaggedField( dst, FieldMemOperand( dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); - LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index))); + LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index))); } void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, @@ -3583,7 +3554,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance) { Label fallthrough, clear_slot; - LoadTaggedPointerField( + LoadTaggedField( scratch_and_result, FieldMemOperand(feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()))); @@ -3610,7 +3581,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, // This is the main Printf implementation. All other Printf variants call // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. -void TurboAssembler::PrintfNoPreserve(const char* format, +void MacroAssembler::PrintfNoPreserve(const char* format, const CPURegister& arg0, const CPURegister& arg1, const CPURegister& arg2, @@ -3644,7 +3615,7 @@ void TurboAssembler::PrintfNoPreserve(const char* format, fp_tmp_list.Remove(kPCSVarargsFP); fp_tmp_list.Remove(arg0, arg1, arg2, arg3); - // Override the TurboAssembler's scratch register list. The lists will be + // Override the MacroAssembler's scratch register list. The lists will be // reset automatically at the end of the UseScratchRegisterScope. UseScratchRegisterScope temps(this); TmpList()->set_bits(tmp_list.bits()); @@ -3760,7 +3731,7 @@ void TurboAssembler::PrintfNoPreserve(const char* format, CallPrintf(arg_count, pcs); } -void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) { +void MacroAssembler::CallPrintf(int arg_count, const CPURegister* args) { ASM_CODE_COMMENT(this); // A call to printf needs special handling for the simulator, since the system // printf function will use a different instruction set and the procedure-call @@ -3790,7 +3761,7 @@ void TurboAssembler::CallPrintf(int arg_count, const CPURegister* args) { Call(ExternalReference::printf_function()); } -void TurboAssembler::Printf(const char* format, CPURegister arg0, +void MacroAssembler::Printf(const char* format, CPURegister arg0, CPURegister arg1, CPURegister arg2, CPURegister arg3) { ASM_CODE_COMMENT(this); @@ -3889,12 +3860,12 @@ CPURegister UseScratchRegisterScope::AcquireNextAvailable( return result; } -void TurboAssembler::ComputeCodeStartAddress(const Register& rd) { +void MacroAssembler::ComputeCodeStartAddress(const Register& rd) { // We can use adr to load a pc relative location. adr(rd, -pc_offset()); } -void TurboAssembler::RestoreFPAndLR() { +void MacroAssembler::RestoreFPAndLR() { static_assert(StandardFrameConstants::kCallerFPOffset + kSystemPointerSize == StandardFrameConstants::kCallerPCOffset, "Offsets must be consecutive for ldp!"); @@ -3913,7 +3884,7 @@ void TurboAssembler::RestoreFPAndLR() { } #if V8_ENABLE_WEBASSEMBLY -void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) { +void MacroAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) { UseScratchRegisterScope temps(this); temps.Exclude(x16, x17); Adr(x17, return_location); @@ -3925,7 +3896,7 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) { } #endif // V8_ENABLE_WEBASSEMBLY -void TurboAssembler::PopcntHelper(Register dst, Register src) { +void MacroAssembler::PopcntHelper(Register dst, Register src) { UseScratchRegisterScope temps(this); VRegister scratch = temps.AcquireV(kFormat8B); VRegister tmp = src.Is32Bits() ? scratch.S() : scratch.D(); @@ -3935,7 +3906,7 @@ void TurboAssembler::PopcntHelper(Register dst, Register src) { Fmov(dst, tmp); } -void TurboAssembler::I64x2BitMask(Register dst, VRegister src) { +void MacroAssembler::I64x2BitMask(Register dst, VRegister src) { ASM_CODE_COMMENT(this); UseScratchRegisterScope scope(this); VRegister tmp1 = scope.AcquireV(kFormat2D); @@ -3946,7 +3917,7 @@ void TurboAssembler::I64x2BitMask(Register dst, VRegister src) { Add(dst.W(), dst.W(), Operand(tmp2.W(), LSL, 1)); } -void TurboAssembler::I64x2AllTrue(Register dst, VRegister src) { +void MacroAssembler::I64x2AllTrue(Register dst, VRegister src) { ASM_CODE_COMMENT(this); UseScratchRegisterScope scope(this); VRegister tmp = scope.AcquireV(kFormat2D); diff --git a/src/codegen/arm64/macro-assembler-arm64.h b/src/codegen/arm64/macro-assembler-arm64.h index b4c9060845..48add85037 100644 --- a/src/codegen/arm64/macro-assembler-arm64.h +++ b/src/codegen/arm64/macro-assembler-arm64.h @@ -146,9 +146,9 @@ enum PreShiftImmMode { // platforms are updated. enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit }; -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; #if DEBUG void set_allow_macro_instructions(bool value) { @@ -1400,14 +1400,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // --------------------------------------------------------------------------- // Pointer compression Support - // Loads a field containing a HeapObject and decompresses it if pointer - // compression is enabled. - void LoadTaggedPointerField(const Register& destination, - const MemOperand& field_operand); - // Loads a field containing any tagged value and decompresses it if necessary. - void LoadAnyTaggedField(const Register& destination, - const MemOperand& field_operand); + void LoadTaggedField(const Register& destination, + const MemOperand& field_operand); // Loads a field containing a tagged signed value and decompresses it if // necessary. @@ -1432,24 +1427,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void DecompressTaggedSigned(const Register& destination, const MemOperand& field_operand); - void DecompressTaggedPointer(const Register& destination, - const MemOperand& field_operand); - void DecompressTaggedPointer(const Register& destination, - const Register& source); - void DecompressTaggedPointer(const Register& destination, Tagged_t immediate); - void DecompressAnyTagged(const Register& destination, - const MemOperand& field_operand); + void DecompressTagged(const Register& destination, + const MemOperand& field_operand); + void DecompressTagged(const Register& destination, const Register& source); + void DecompressTagged(const Register& destination, Tagged_t immediate); void AtomicDecompressTaggedSigned(const Register& destination, const Register& base, const Register& index, const Register& temp); - void AtomicDecompressTaggedPointer(const Register& destination, - const Register& base, - const Register& index, - const Register& temp); - void AtomicDecompressAnyTagged(const Register& destination, - const Register& base, const Register& index, - const Register& temp); + void AtomicDecompressTagged(const Register& destination, const Register& base, + const Register& index, const Register& temp); // Restore FP and LR from the values stored in the current frame. This will // authenticate the LR when pointer authentication is enabled. @@ -1484,81 +1471,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ExternalPointerTag tag, Register isolate_root = Register::no_reg()); - protected: - // The actual Push and Pop implementations. These don't generate any code - // other than that required for the push or pop. This allows - // (Push|Pop)CPURegList to bundle together run-time assertions for a large - // block of registers. - // - // Note that size is per register, and is specified in bytes. - void PushHelper(int count, int size, const CPURegister& src0, - const CPURegister& src1, const CPURegister& src2, - const CPURegister& src3); - void PopHelper(int count, int size, const CPURegister& dst0, - const CPURegister& dst1, const CPURegister& dst2, - const CPURegister& dst3); - - void ConditionalCompareMacro(const Register& rn, const Operand& operand, - StatusFlags nzcv, Condition cond, - ConditionalCompareOp op); - - void AddSubWithCarryMacro(const Register& rd, const Register& rn, - const Operand& operand, FlagsUpdate S, - AddSubWithCarryOp op); - - // Call Printf. On a native build, a simple call will be generated, but if the - // simulator is being used then a suitable pseudo-instruction is used. The - // arguments and stack must be prepared by the caller as for a normal AAPCS64 - // call to 'printf'. - // - // The 'args' argument should point to an array of variable arguments in their - // proper PCS registers (and in calling order). The argument registers can - // have mixed types. The format string (x0) should not be included. - void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr); - - private: -#if DEBUG - // Tell whether any of the macro instruction can be used. When false the - // MacroAssembler will assert if a method which can emit a variable number - // of instructions is called. - bool allow_macro_instructions_ = true; -#endif - - // Scratch registers available for use by the MacroAssembler. - CPURegList tmp_list_ = DefaultTmpList(); - CPURegList fptmp_list_ = DefaultFPTmpList(); - - // Helps resolve branching to labels potentially out of range. - // If the label is not bound, it registers the information necessary to later - // be able to emit a veneer for this branch if necessary. - // If the label is bound, it returns true if the label (or the previous link - // in the label chain) is out of range. In that case the caller is responsible - // for generating appropriate code. - // Otherwise it returns false. - // This function also checks wether veneers need to be emitted. - bool NeedExtraInstructionsOrRegisterBranch(Label* label, - ImmBranchType branch_type); - - void Movi16bitHelper(const VRegister& vd, uint64_t imm); - void Movi32bitHelper(const VRegister& vd, uint64_t imm); - void Movi64bitHelper(const VRegister& vd, uint64_t imm); - - void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr, - LoadStoreOp op); - - void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, - const MemOperand& addr, LoadStorePairOp op); - - int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, - byte* pc); - - void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al); -}; - -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // Instruction set functions ------------------------------------------------ // Logical macros. inline void Bics(const Register& rd, const Register& rn, @@ -1594,18 +1506,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { Condition cond); inline void Extr(const Register& rd, const Register& rn, const Register& rm, unsigned lsb); - void Fcvtl(const VRegister& vd, const VRegister& vn) { - DCHECK(allow_macro_instructions()); - fcvtl(vd, vn); - } void Fcvtl2(const VRegister& vd, const VRegister& vn) { DCHECK(allow_macro_instructions()); fcvtl2(vd, vn); } - void Fcvtn(const VRegister& vd, const VRegister& vn) { - DCHECK(allow_macro_instructions()); - fcvtn(vd, vn); - } void Fcvtn2(const VRegister& vd, const VRegister& vn) { DCHECK(allow_macro_instructions()); fcvtn2(vd, vn); @@ -1641,7 +1545,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { DCHECK(allow_macro_instructions()); mvni(vd, imm8, shift, shift_amount); } - inline void Rev(const Register& rd, const Register& rn); inline void Smaddl(const Register& rd, const Register& rn, const Register& rm, const Register& ra); inline void Smsubl(const Register& rd, const Register& rn, const Register& rm, @@ -2139,6 +2042,76 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { Register feedback_vector, FeedbackSlot slot, Label* on_result, Label::Distance distance); + protected: + // The actual Push and Pop implementations. These don't generate any code + // other than that required for the push or pop. This allows + // (Push|Pop)CPURegList to bundle together run-time assertions for a large + // block of registers. + // + // Note that size is per register, and is specified in bytes. + void PushHelper(int count, int size, const CPURegister& src0, + const CPURegister& src1, const CPURegister& src2, + const CPURegister& src3); + void PopHelper(int count, int size, const CPURegister& dst0, + const CPURegister& dst1, const CPURegister& dst2, + const CPURegister& dst3); + + void ConditionalCompareMacro(const Register& rn, const Operand& operand, + StatusFlags nzcv, Condition cond, + ConditionalCompareOp op); + + void AddSubWithCarryMacro(const Register& rd, const Register& rn, + const Operand& operand, FlagsUpdate S, + AddSubWithCarryOp op); + + // Call Printf. On a native build, a simple call will be generated, but if the + // simulator is being used then a suitable pseudo-instruction is used. The + // arguments and stack must be prepared by the caller as for a normal AAPCS64 + // call to 'printf'. + // + // The 'args' argument should point to an array of variable arguments in their + // proper PCS registers (and in calling order). The argument registers can + // have mixed types. The format string (x0) should not be included. + void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr); + + private: +#if DEBUG + // Tell whether any of the macro instruction can be used. When false the + // MacroAssembler will assert if a method which can emit a variable number + // of instructions is called. + bool allow_macro_instructions_ = true; +#endif + + // Scratch registers available for use by the MacroAssembler. + CPURegList tmp_list_ = DefaultTmpList(); + CPURegList fptmp_list_ = DefaultFPTmpList(); + + // Helps resolve branching to labels potentially out of range. + // If the label is not bound, it registers the information necessary to later + // be able to emit a veneer for this branch if necessary. + // If the label is bound, it returns true if the label (or the previous link + // in the label chain) is out of range. In that case the caller is responsible + // for generating appropriate code. + // Otherwise it returns false. + // This function also checks wether veneers need to be emitted. + bool NeedExtraInstructionsOrRegisterBranch(Label* label, + ImmBranchType branch_type); + + void Movi16bitHelper(const VRegister& vd, uint64_t imm); + void Movi32bitHelper(const VRegister& vd, uint64_t imm); + void Movi64bitHelper(const VRegister& vd, uint64_t imm); + + void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr, + LoadStoreOp op); + + void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2, + const MemOperand& addr, LoadStorePairOp op); + + int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode, + byte* pc); + + void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); }; @@ -2148,38 +2121,38 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // emitted is what you specified when creating the scope. class V8_NODISCARD InstructionAccurateScope { public: - explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0) - : tasm_(tasm), - block_pool_(tasm, count * kInstrSize) + explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0) + : masm_(masm), + block_pool_(masm, count * kInstrSize) #ifdef DEBUG , size_(count * kInstrSize) #endif { - tasm_->CheckVeneerPool(false, true, count * kInstrSize); - tasm_->StartBlockVeneerPool(); + masm_->CheckVeneerPool(false, true, count * kInstrSize); + masm_->StartBlockVeneerPool(); #ifdef DEBUG if (count != 0) { - tasm_->bind(&start_); + masm_->bind(&start_); } - previous_allow_macro_instructions_ = tasm_->allow_macro_instructions(); - tasm_->set_allow_macro_instructions(false); + previous_allow_macro_instructions_ = masm_->allow_macro_instructions(); + masm_->set_allow_macro_instructions(false); #endif } ~InstructionAccurateScope() { - tasm_->EndBlockVeneerPool(); + masm_->EndBlockVeneerPool(); #ifdef DEBUG if (start_.is_bound()) { - DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_); + DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_); } - tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_); + masm_->set_allow_macro_instructions(previous_allow_macro_instructions_); #endif } private: - TurboAssembler* tasm_; - TurboAssembler::BlockConstPoolScope block_pool_; + MacroAssembler* masm_; + MacroAssembler::BlockConstPoolScope block_pool_; #ifdef DEBUG size_t size_; Label start_; @@ -2188,7 +2161,7 @@ class V8_NODISCARD InstructionAccurateScope { }; // This scope utility allows scratch registers to be managed safely. The -// TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch +// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch // registers. These registers can be allocated on demand, and will be returned // at the end of the scope. // @@ -2198,9 +2171,9 @@ class V8_NODISCARD InstructionAccurateScope { // order as the constructors. We do not have assertions for this. class V8_NODISCARD UseScratchRegisterScope { public: - explicit UseScratchRegisterScope(TurboAssembler* tasm) - : available_(tasm->TmpList()), - availablefp_(tasm->FPTmpList()), + explicit UseScratchRegisterScope(MacroAssembler* masm) + : available_(masm->TmpList()), + availablefp_(masm->FPTmpList()), old_available_(available_->bits()), old_availablefp_(availablefp_->bits()) { DCHECK_EQ(available_->type(), CPURegister::kRegister); diff --git a/src/codegen/code-stub-assembler.h b/src/codegen/code-stub-assembler.h index fdd6da6017..1890d37719 100644 --- a/src/codegen/code-stub-assembler.h +++ b/src/codegen/code-stub-assembler.h @@ -166,7 +166,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \ V(Function_string, function_string, FunctionString) \ V(function_to_string, function_to_string, FunctionToString) \ - V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \ V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \ V(Infinity_string, Infinity_string, InfinityString) \ V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \ diff --git a/src/codegen/ia32/macro-assembler-ia32.cc b/src/codegen/ia32/macro-assembler-ia32.cc index f23df0884e..994e01288c 100644 --- a/src/codegen/ia32/macro-assembler-ia32.cc +++ b/src/codegen/ia32/macro-assembler-ia32.cc @@ -21,11 +21,11 @@ #include "src/codegen/ia32/register-ia32.h" #include "src/codegen/interface-descriptors-inl.h" #include "src/codegen/label.h" +#include "src/codegen/macro-assembler-base.h" #include "src/codegen/macro-assembler.h" #include "src/codegen/register.h" #include "src/codegen/reglist.h" #include "src/codegen/reloc-info.h" -#include "src/codegen/turbo-assembler.h" #include "src/common/globals.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frame-constants.h" @@ -77,18 +77,18 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) const { // ------------------------------------------------------------------------- // MacroAssembler implementation. -void TurboAssembler::InitializeRootRegister() { +void MacroAssembler::InitializeRootRegister() { ASM_CODE_COMMENT(this); ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); Move(kRootRegister, Immediate(isolate_root)); } -Operand TurboAssembler::RootAsOperand(RootIndex index) { +Operand MacroAssembler::RootAsOperand(RootIndex index) { DCHECK(root_array_available()); return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); if (root_array_available()) { mov(destination, RootAsOperand(index)); @@ -113,7 +113,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) { mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::CompareRoot(Register with, Register scratch, +void MacroAssembler::CompareRoot(Register with, Register scratch, RootIndex index) { ASM_CODE_COMMENT(this); if (root_array_available()) { @@ -126,7 +126,7 @@ void TurboAssembler::CompareRoot(Register with, Register scratch, } } -void TurboAssembler::CompareRoot(Register with, RootIndex index) { +void MacroAssembler::CompareRoot(Register with, RootIndex index) { ASM_CODE_COMMENT(this); if (root_array_available()) { cmp(with, RootAsOperand(index)); @@ -180,7 +180,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, j(below_equal, on_in_range, near_jump); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, PushArrayOrder order) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(array, size, scratch)); @@ -206,7 +206,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference, +Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference, Register scratch) { if (root_array_available() && options().enable_root_relative_access) { intptr_t delta = @@ -233,8 +233,8 @@ Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference, } // TODO(v8:6666): If possible, refactor into a platform-independent function in -// TurboAssembler. -Operand TurboAssembler::ExternalReferenceAddressAsOperand( +// MacroAssembler. +Operand MacroAssembler::ExternalReferenceAddressAsOperand( ExternalReference reference) { DCHECK(root_array_available()); DCHECK(options().isolate_independent_code); @@ -244,8 +244,8 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand( } // TODO(v8:6666): If possible, refactor into a platform-independent function in -// TurboAssembler. -Operand TurboAssembler::HeapObjectAsOperand(Handle object) { +// MacroAssembler. +Operand MacroAssembler::HeapObjectAsOperand(Handle object) { DCHECK(root_array_available()); Builtin builtin; @@ -264,7 +264,7 @@ Operand TurboAssembler::HeapObjectAsOperand(Handle object) { } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { ASM_CODE_COMMENT(this); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -273,7 +273,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index))); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { ASM_CODE_COMMENT(this); DCHECK(is_int32(offset)); @@ -285,13 +285,13 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); mov(destination, Operand(kRootRegister, offset)); } -void TurboAssembler::LoadAddress(Register destination, +void MacroAssembler::LoadAddress(Register destination, ExternalReference source) { // TODO(jgruber): Add support for enable_root_relative_access. if (root_array_available() && options().isolate_independent_code) { @@ -301,7 +301,7 @@ void TurboAssembler::LoadAddress(Register destination, mov(destination, Immediate(source)); } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) const { int bytes = 0; RegList saved_regs = kCallerSaved - exclusion; @@ -315,7 +315,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); // We don't allow a GC in a write barrier slow path so there is no need to @@ -346,7 +346,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); int bytes = 0; if (fp_mode == SaveFPRegsMode::kSave) { @@ -412,19 +412,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { for (Register reg : registers) { push(reg); } } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { for (Register reg : base::Reversed(registers)) { pop(reg); } } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); @@ -449,7 +449,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -473,7 +473,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { ASM_CODE_COMMENT(this); @@ -547,17 +547,17 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, } } -void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtsi2ss(XMMRegister dst, Operand src) { xorps(dst, dst); cvtsi2ss(dst, src); } -void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtsi2sd(XMMRegister dst, Operand src) { xorpd(dst, dst); cvtsi2sd(dst, src); } -void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) { +void MacroAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) { Label done; Register src_reg = src.is_reg_only() ? src.reg() : tmp; if (src_reg == tmp) mov(tmp, src); @@ -578,7 +578,7 @@ void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) { bind(&done); } -void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) { +void MacroAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) { Label done; cvttss2si(dst, src); test(dst, dst); @@ -590,7 +590,7 @@ void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) { bind(&done); } -void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) { +void MacroAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) { Label done; cmp(src, Immediate(0)); ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias(); @@ -600,14 +600,14 @@ void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) { bind(&done); } -void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) { +void MacroAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) { Move(tmp, -2147483648.0); addsd(tmp, src); cvttsd2si(dst, tmp); add(dst, Immediate(0x80000000)); } -void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) { +void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) { DCHECK_GE(63, shift); if (shift >= 32) { mov(high, low); @@ -619,7 +619,7 @@ void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) { } } -void TurboAssembler::ShlPair_cl(Register high, Register low) { +void MacroAssembler::ShlPair_cl(Register high, Register low) { ASM_CODE_COMMENT(this); shld_cl(high, low); shl_cl(low); @@ -631,7 +631,7 @@ void TurboAssembler::ShlPair_cl(Register high, Register low) { bind(&done); } -void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) { +void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) { DCHECK_GE(63, shift); if (shift >= 32) { mov(low, high); @@ -643,7 +643,7 @@ void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) { } } -void TurboAssembler::ShrPair_cl(Register high, Register low) { +void MacroAssembler::ShrPair_cl(Register high, Register low) { ASM_CODE_COMMENT(this); shrd_cl(low, high); shr_cl(high); @@ -655,7 +655,7 @@ void TurboAssembler::ShrPair_cl(Register high, Register low) { bind(&done); } -void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) { +void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) { ASM_CODE_COMMENT(this); DCHECK_GE(63, shift); if (shift >= 32) { @@ -668,7 +668,7 @@ void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) { } } -void TurboAssembler::SarPair_cl(Register high, Register low) { +void MacroAssembler::SarPair_cl(Register high, Register low) { ASM_CODE_COMMENT(this); shrd_cl(low, high); sar_cl(high); @@ -680,7 +680,7 @@ void TurboAssembler::SarPair_cl(Register high, Register low) { bind(&done); } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { mov(destination, FieldOperand(object, HeapObject::kMapOffset)); } @@ -979,23 +979,23 @@ void MacroAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::Assert(Condition cc, AbortReason reason) { +void MacroAssembler::Assert(Condition cc, AbortReason reason) { if (v8_flags.debug_code) Check(cc, reason); } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { ASM_CODE_COMMENT(this); push(ebp); // Caller's frame pointer. mov(ebp, esp); push(Immediate(StackFrame::TypeToMarker(type))); } -void TurboAssembler::Prologue() { +void MacroAssembler::Prologue() { ASM_CODE_COMMENT(this); push(ebp); // Caller's frame pointer. mov(ebp, esp); @@ -1004,7 +1004,7 @@ void TurboAssembler::Prologue() { push(kJavaScriptCallArgCountRegister); // Actual argument count. } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0; @@ -1034,7 +1034,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArguments(Register count, Register scratch, +void MacroAssembler::DropArguments(Register count, Register scratch, ArgumentsCountType type, ArgumentsCountMode mode) { DCHECK(!AreAliased(count, scratch)); @@ -1043,7 +1043,7 @@ void TurboAssembler::DropArguments(Register count, Register scratch, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, Register scratch, ArgumentsCountType type, @@ -1055,7 +1055,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Operand receiver, Register scratch, ArgumentsCountType type, @@ -1068,7 +1068,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, PushReturnAddressFrom(scratch); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); push(ebp); mov(ebp, esp); @@ -1080,7 +1080,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); if (v8_flags.debug_code && !StackFrame::IsJavaScript(type)) { cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset), @@ -1091,7 +1091,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { } #ifdef V8_OS_WIN -void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { +void MacroAssembler::AllocateStackSpace(Register bytes_scratch) { ASM_CODE_COMMENT(this); // In windows, we cannot increment the stack size by more than one page // (minimum page size is 4KB) without accessing at least one byte on the @@ -1113,7 +1113,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { sub(esp, bytes_scratch); } -void TurboAssembler::AllocateStackSpace(int bytes) { +void MacroAssembler::AllocateStackSpace(int bytes) { ASM_CODE_COMMENT(this); DCHECK_GE(bytes, 0); while (bytes >= kStackPageSize) { @@ -1332,10 +1332,10 @@ void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); cmp(with, Operand(kRootRegister, offset)); } @@ -1565,9 +1565,9 @@ void MacroAssembler::LoadNativeContextSlot(Register destination, int index) { mov(destination, Operand(destination, Context::SlotOffset(index))); } -void TurboAssembler::Ret() { ret(0); } +void MacroAssembler::Ret() { ret(0); } -void TurboAssembler::Ret(int bytes_dropped, Register scratch) { +void MacroAssembler::Ret(int bytes_dropped, Register scratch) { if (is_uint16(bytes_dropped)) { ret(bytes_dropped); } else { @@ -1578,7 +1578,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) { } } -void TurboAssembler::Push(Immediate value) { +void MacroAssembler::Push(Immediate value) { if (root_array_available() && options().isolate_independent_code) { if (value.is_embedded_object()) { Push(HeapObjectAsOperand(value.embedded_object())); @@ -1597,13 +1597,13 @@ void MacroAssembler::Drop(int stack_elements) { } } -void TurboAssembler::Move(Register dst, Register src) { +void MacroAssembler::Move(Register dst, Register src) { if (dst != src) { mov(dst, src); } } -void TurboAssembler::Move(Register dst, const Immediate& src) { +void MacroAssembler::Move(Register dst, const Immediate& src) { if (!src.is_heap_number_request() && src.is_zero()) { xor_(dst, dst); // Shorter than mov of 32-bit immediate 0. } else if (src.is_external_reference()) { @@ -1613,7 +1613,7 @@ void TurboAssembler::Move(Register dst, const Immediate& src) { } } -void TurboAssembler::Move(Operand dst, const Immediate& src) { +void MacroAssembler::Move(Operand dst, const Immediate& src) { // Since there's no scratch register available, take a detour through the // stack. if (root_array_available() && options().isolate_independent_code) { @@ -1632,9 +1632,9 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) { } } -void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); } +void MacroAssembler::Move(Register dst, Operand src) { mov(dst, src); } -void TurboAssembler::Move(Register dst, Handle src) { +void MacroAssembler::Move(Register dst, Handle src) { if (root_array_available() && options().isolate_independent_code) { IndirectLoadConstant(dst, src); return; @@ -1642,7 +1642,7 @@ void TurboAssembler::Move(Register dst, Handle src) { mov(dst, src); } -void TurboAssembler::Move(XMMRegister dst, uint32_t src) { +void MacroAssembler::Move(XMMRegister dst, uint32_t src) { if (src == 0) { pxor(dst, dst); } else { @@ -1666,7 +1666,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) { } } -void TurboAssembler::Move(XMMRegister dst, uint64_t src) { +void MacroAssembler::Move(XMMRegister dst, uint64_t src) { if (src == 0) { pxor(dst, dst); } else { @@ -1705,7 +1705,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) { } } -void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src, +void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8) { if (imm8 == 0) { Movd(dst, src); @@ -1721,7 +1721,7 @@ void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src, add(esp, Immediate(kDoubleSize)); } -void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, +void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, uint32_t* load_pc_offset) { // Without AVX or SSE, we can only have 64-bit values in xmm registers. // We don't have an xmm scratch register, so move the data via the stack. This @@ -1742,7 +1742,7 @@ void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, add(esp, Immediate(kDoubleSize)); } -void TurboAssembler::Lzcnt(Register dst, Operand src) { +void MacroAssembler::Lzcnt(Register dst, Operand src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcnt(dst, src); @@ -1756,7 +1756,7 @@ void TurboAssembler::Lzcnt(Register dst, Operand src) { xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x. } -void TurboAssembler::Tzcnt(Register dst, Operand src) { +void MacroAssembler::Tzcnt(Register dst, Operand src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcnt(dst, src); @@ -1769,7 +1769,7 @@ void TurboAssembler::Tzcnt(Register dst, Operand src) { bind(¬_zero_src); } -void TurboAssembler::Popcnt(Register dst, Operand src) { +void MacroAssembler::Popcnt(Register dst, Operand src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcnt(dst, src); @@ -1816,7 +1816,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, } } -void TurboAssembler::Check(Condition cc, AbortReason reason) { +void MacroAssembler::Check(Condition cc, AbortReason reason) { Label L; j(cc, &L); Abort(reason); @@ -1824,7 +1824,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) { bind(&L); } -void TurboAssembler::CheckStackAlignment() { +void MacroAssembler::CheckStackAlignment() { ASM_CODE_COMMENT(this); int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; @@ -1839,7 +1839,7 @@ void TurboAssembler::CheckStackAlignment() { } } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { if (v8_flags.code_comments) { const char* msg = GetAbortReason(reason); RecordComment("Abort message: "); @@ -1882,7 +1882,7 @@ void TurboAssembler::Abort(AbortReason reason) { int3(); } -void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { +void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { ASM_CODE_COMMENT(this); int frame_alignment = base::OS::ActivationFrameAlignment(); if (frame_alignment != 0) { @@ -1898,14 +1898,14 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) { } } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { // Trashing eax is ok as it will be the return value. Move(eax, Immediate(function)); CallCFunction(eax, num_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { ASM_CODE_COMMENT(this); DCHECK_LE(num_arguments, kMaxCParameters); DCHECK(has_frame()); @@ -1956,7 +1956,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) { } } -void TurboAssembler::PushPC() { +void MacroAssembler::PushPC() { // Push the current PC onto the stack as "return address" via calling // the next instruction. Label get_pc; @@ -1964,7 +1964,7 @@ void TurboAssembler::PushPC() { bind(&get_pc); } -void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { ASM_CODE_COMMENT(this); DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); @@ -1977,7 +1977,7 @@ void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { call(code_object, rmode); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); static_assert(kSystemPointerSize == 4); static_assert(kSmiShiftSize == 0); @@ -1993,13 +1993,13 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { ASM_CODE_COMMENT(this); LoadEntryFromBuiltinIndex(builtin_index); call(builtin_index); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); switch (options().builtin_call_jump_mode) { case BuiltinCallJumpMode::kAbsolute: { @@ -2019,7 +2019,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); switch (options().builtin_call_jump_mode) { @@ -2040,17 +2040,17 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -2058,12 +2058,12 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { LoadCodeEntry(code_object, code_object); call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { LoadCodeEntry(code_object, code_object); switch (jump_mode) { case JumpMode::kJump: @@ -2076,13 +2076,13 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { } } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { DCHECK(root_array_available()); jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry( isolate(), reference))); } -void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { +void MacroAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); Builtin builtin = Builtin::kNoBuiltinId; @@ -2094,7 +2094,7 @@ void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { jmp(code_object, rmode); } -void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, +void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, Label::Distance condition_met_distance) { ASM_CODE_COMMENT(this); @@ -2113,7 +2113,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, j(cc, condition_met, condition_met_distance); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { ASM_CODE_COMMENT(this); // In order to get the address of the current instruction, we first need // to use a call and then use a pop, thus pushing the return address to @@ -2128,7 +2128,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { } } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -2138,8 +2138,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::Trap() { int3(); } -void TurboAssembler::DebugBreak() { int3(); } +void MacroAssembler::Trap() { int3(); } +void MacroAssembler::DebugBreak() { int3(); } } // namespace internal } // namespace v8 diff --git a/src/codegen/ia32/macro-assembler-ia32.h b/src/codegen/ia32/macro-assembler-ia32.h index 485ed210fa..273eea23ff 100644 --- a/src/codegen/ia32/macro-assembler-ia32.h +++ b/src/codegen/ia32/macro-assembler-ia32.h @@ -21,10 +21,10 @@ #include "src/codegen/ia32/assembler-ia32.h" #include "src/codegen/ia32/register-ia32.h" #include "src/codegen/label.h" +#include "src/codegen/macro-assembler-base.h" #include "src/codegen/reglist.h" #include "src/codegen/reloc-info.h" #include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h" -#include "src/codegen/turbo-assembler.h" #include "src/common/globals.h" #include "src/execution/frames.h" #include "src/handles/handles.h" @@ -68,10 +68,10 @@ class StackArgumentsAccessor { DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor); }; -class V8_EXPORT_PRIVATE TurboAssembler - : public SharedTurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler + : public SharedMacroAssembler { public: - using SharedTurboAssemblerBase::SharedTurboAssemblerBase; + using SharedMacroAssembler::SharedMacroAssembler; void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, @@ -411,17 +411,6 @@ class V8_EXPORT_PRIVATE TurboAssembler // Define an exception handler and bind a label. void BindExceptionHandler(Label* label) { bind(label); } - protected: - // Drops arguments assuming that the return address was already popped. - void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger, - ArgumentsCountMode mode = kCountExcludesReceiver); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - void PushRoot(RootIndex index); // Compare the object in a register to a value and jump if they are equal. @@ -671,6 +660,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void StackOverflowCheck(Register num_args, Register scratch, Label* stack_overflow, bool include_receiver = false); + protected: + // Drops arguments assuming that the return address was already popped. + void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger, + ArgumentsCountMode mode = kCountExcludesReceiver); + private: // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, diff --git a/src/codegen/loong64/macro-assembler-loong64.cc b/src/codegen/loong64/macro-assembler-loong64.cc index f4625dd5a8..3795e9ccac 100644 --- a/src/codegen/loong64/macro-assembler-loong64.cc +++ b/src/codegen/loong64/macro-assembler-loong64.cc @@ -48,7 +48,7 @@ static inline bool IsZero(const Operand& rk) { } } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -65,7 +65,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -83,7 +83,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -100,11 +100,11 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { Ld_d(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { if (marker_reg.is_valid()) { Push(ra, fp, marker_reg); Add_d(fp, sp, Operand(kPointerSize)); @@ -114,7 +114,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int offset = -StandardFrameConstants::kContextOffset; if (function_reg.is_valid()) { Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); @@ -165,17 +165,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, bind(&done); } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object); @@ -193,7 +193,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Operand offset, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -212,7 +212,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -232,7 +232,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, } } -void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, +void MacroAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot, Register object, Operand offset) { ASM_CODE_COMMENT(this); DCHECK_NE(dst_object, dst_slot); @@ -325,7 +325,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, // --------------------------------------------------------------------------- // Instruction macros. -void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Add_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { add_w(rd, rj, rk.rm()); } else { @@ -342,7 +342,7 @@ void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Add_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { add_d(rd, rj, rk.rm()); } else { @@ -359,7 +359,7 @@ void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sub_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sub_w(rd, rj, rk.rm()); } else { @@ -384,7 +384,7 @@ void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sub_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sub_d(rd, rj, rk.rm()); } else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) { @@ -411,7 +411,7 @@ void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mul_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mul_w(rd, rj, rk.rm()); } else { @@ -424,7 +424,7 @@ void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mulh_w(rd, rj, rk.rm()); } else { @@ -437,7 +437,7 @@ void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mulh_wu(rd, rj, rk.rm()); } else { @@ -450,7 +450,7 @@ void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mul_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mul_d(rd, rj, rk.rm()); } else { @@ -463,7 +463,7 @@ void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mulh_d(rd, rj, rk.rm()); } else { @@ -476,7 +476,7 @@ void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mulh_du(rd, rj, rk.rm()); } else { @@ -489,7 +489,7 @@ void TurboAssembler::Mulh_du(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Div_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { div_w(rd, rj, rk.rm()); } else { @@ -502,7 +502,7 @@ void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mod_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mod_w(rd, rj, rk.rm()); } else { @@ -515,7 +515,7 @@ void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mod_wu(rd, rj, rk.rm()); } else { @@ -528,7 +528,7 @@ void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Div_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { div_d(rd, rj, rk.rm()); } else { @@ -541,7 +541,7 @@ void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Div_wu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { div_wu(rd, rj, rk.rm()); } else { @@ -554,7 +554,7 @@ void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Div_du(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { div_du(rd, rj, rk.rm()); } else { @@ -567,7 +567,7 @@ void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mod_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mod_d(rd, rj, rk.rm()); } else { @@ -580,7 +580,7 @@ void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Mod_du(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { mod_du(rd, rj, rk.rm()); } else { @@ -593,7 +593,7 @@ void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::And(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::And(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { and_(rd, rj, rk.rm()); } else { @@ -610,7 +610,7 @@ void TurboAssembler::And(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Or(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { or_(rd, rj, rk.rm()); } else { @@ -627,7 +627,7 @@ void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Xor(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { xor_(rd, rj, rk.rm()); } else { @@ -644,7 +644,7 @@ void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Nor(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { nor(rd, rj, rk.rm()); } else { @@ -657,7 +657,7 @@ void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Andn(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { andn(rd, rj, rk.rm()); } else { @@ -670,7 +670,7 @@ void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Orn(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { orn(rd, rj, rk.rm()); } else { @@ -683,12 +683,12 @@ void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Neg(Register rj, const Operand& rk) { +void MacroAssembler::Neg(Register rj, const Operand& rk) { DCHECK(rk.is_reg()); sub_d(rj, zero_reg, rk.rm()); } -void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Slt(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { slt(rd, rj, rk.rm()); } else { @@ -706,7 +706,7 @@ void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sltu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sltu(rd, rj, rk.rm()); } else { @@ -724,7 +724,7 @@ void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sle(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { slt(rd, rk.rm(), rj); } else { @@ -739,7 +739,7 @@ void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) { xori(rd, rd, 1); } -void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sleu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sltu(rd, rk.rm(), rj); } else { @@ -754,17 +754,17 @@ void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) { xori(rd, rd, 1); } -void TurboAssembler::Sge(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sge(Register rd, Register rj, const Operand& rk) { Slt(rd, rj, rk); xori(rd, rd, 1); } -void TurboAssembler::Sgeu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sgeu(Register rd, Register rj, const Operand& rk) { Sltu(rd, rj, rk); xori(rd, rd, 1); } -void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sgt(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { slt(rd, rk.rm(), rj); } else { @@ -778,7 +778,7 @@ void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Sgtu(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { sltu(rd, rk.rm(), rj); } else { @@ -792,7 +792,7 @@ void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { rotr_w(rd, rj, rk.rm()); } else { @@ -804,7 +804,7 @@ void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) { +void MacroAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) { if (rk.is_reg()) { rotr_d(rd, rj, rk.rm()); } else { @@ -814,7 +814,7 @@ void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) { } } -void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, +void MacroAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, Register scratch) { DCHECK(sa >= 1 && sa <= 31); if (sa <= 4) { @@ -827,7 +827,7 @@ void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa, } } -void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, +void MacroAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, Register scratch) { DCHECK(sa >= 1 && sa <= 63); if (sa <= 4) { @@ -843,7 +843,7 @@ void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa, // ------------Pseudo-instructions------------- // Change endianness -void TurboAssembler::ByteSwapSigned(Register dest, Register src, +void MacroAssembler::ByteSwapSigned(Register dest, Register src, int operand_size) { DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8); if (operand_size == 2) { @@ -857,7 +857,7 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src, } } -void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, +void MacroAssembler::ByteSwapUnsigned(Register dest, Register src, int operand_size) { DCHECK(operand_size == 2 || operand_size == 4); if (operand_size == 2) { @@ -869,7 +869,7 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, } } -void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_b(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -879,7 +879,7 @@ void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_bu(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -889,7 +889,7 @@ void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) { } } -void TurboAssembler::St_b(Register rd, const MemOperand& rj) { +void MacroAssembler::St_b(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -899,7 +899,7 @@ void TurboAssembler::St_b(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_h(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -909,7 +909,7 @@ void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_hu(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -919,7 +919,7 @@ void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) { } } -void TurboAssembler::St_h(Register rd, const MemOperand& rj) { +void MacroAssembler::St_h(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -929,7 +929,7 @@ void TurboAssembler::St_h(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_w(Register rd, const MemOperand& rj) { MemOperand source = rj; if (!(source.hasIndexReg()) && is_int16(source.offset()) && @@ -946,7 +946,7 @@ void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_wu(Register rd, const MemOperand& rj) { MemOperand source = rj; AdjustBaseAndOffset(&source); if (source.hasIndexReg()) { @@ -956,7 +956,7 @@ void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) { } } -void TurboAssembler::St_w(Register rd, const MemOperand& rj) { +void MacroAssembler::St_w(Register rd, const MemOperand& rj) { MemOperand source = rj; if (!(source.hasIndexReg()) && is_int16(source.offset()) && @@ -973,7 +973,7 @@ void TurboAssembler::St_w(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) { +void MacroAssembler::Ld_d(Register rd, const MemOperand& rj) { MemOperand source = rj; if (!(source.hasIndexReg()) && is_int16(source.offset()) && @@ -990,7 +990,7 @@ void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) { } } -void TurboAssembler::St_d(Register rd, const MemOperand& rj) { +void MacroAssembler::St_d(Register rd, const MemOperand& rj) { MemOperand source = rj; if (!(source.hasIndexReg()) && is_int16(source.offset()) && @@ -1007,7 +1007,7 @@ void TurboAssembler::St_d(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) { +void MacroAssembler::Fld_s(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); if (tmp.hasIndexReg()) { @@ -1017,7 +1017,7 @@ void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) { } } -void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) { +void MacroAssembler::Fst_s(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); if (tmp.hasIndexReg()) { @@ -1027,7 +1027,7 @@ void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) { } } -void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) { +void MacroAssembler::Fld_d(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); if (tmp.hasIndexReg()) { @@ -1037,7 +1037,7 @@ void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) { } } -void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) { +void MacroAssembler::Fst_d(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); if (tmp.hasIndexReg()) { @@ -1047,7 +1047,7 @@ void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) { } } -void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) { +void MacroAssembler::Ll_w(Register rd, const MemOperand& rj) { DCHECK(!rj.hasIndexReg()); bool is_one_instruction = is_int14(rj.offset()); if (is_one_instruction) { @@ -1061,7 +1061,7 @@ void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) { +void MacroAssembler::Ll_d(Register rd, const MemOperand& rj) { DCHECK(!rj.hasIndexReg()); bool is_one_instruction = is_int14(rj.offset()); if (is_one_instruction) { @@ -1075,7 +1075,7 @@ void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) { +void MacroAssembler::Sc_w(Register rd, const MemOperand& rj) { DCHECK(!rj.hasIndexReg()); bool is_one_instruction = is_int14(rj.offset()); if (is_one_instruction) { @@ -1089,7 +1089,7 @@ void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) { } } -void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) { +void MacroAssembler::Sc_d(Register rd, const MemOperand& rj) { DCHECK(!rj.hasIndexReg()); bool is_one_instruction = is_int14(rj.offset()); if (is_one_instruction) { @@ -1103,7 +1103,7 @@ void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) { } } -void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { +void MacroAssembler::li(Register dst, Handle value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1114,7 +1114,7 @@ void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { li(dst, Operand(value), mode); } -void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { +void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1134,7 +1134,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) { } } -void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) { +void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { if (is_int12(static_cast(j.immediate()))) { addi_d(rd, zero_reg, j.immediate()); } else if (is_uint12(static_cast(j.immediate()))) { @@ -1147,7 +1147,7 @@ void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) { } } -int TurboAssembler::InstrCountForLi64Bit(int64_t value) { +int MacroAssembler::InstrCountForLi64Bit(int64_t value) { if (is_int32(value)) { return InstrCountForLiLower32Bit(value); } else if (is_int52(value)) { @@ -1177,7 +1177,7 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) { // All changes to if...else conditions here must be added to // InstrCountForLi64Bit as well. -void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); DCHECK(!MustUseReg(j.rmode())); DCHECK(mode == OPTIMIZE_SIZE); @@ -1212,7 +1212,7 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { } } -void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); BlockTrampolinePoolScope block_trampoline_pool(this); if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { @@ -1245,7 +1245,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { } } -void TurboAssembler::MultiPush(RegList regs) { +void MacroAssembler::MultiPush(RegList regs) { int16_t stack_offset = 0; for (int16_t i = kNumRegisters - 1; i >= 0; i--) { @@ -1257,7 +1257,7 @@ void TurboAssembler::MultiPush(RegList regs) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPush(RegList regs1, RegList regs2) { +void MacroAssembler::MultiPush(RegList regs1, RegList regs2) { DCHECK((regs1 & regs2).is_empty()); int16_t stack_offset = 0; @@ -1276,7 +1276,7 @@ void TurboAssembler::MultiPush(RegList regs1, RegList regs2) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) { +void MacroAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) { DCHECK((regs1 & regs2).is_empty()); DCHECK((regs1 & regs3).is_empty()); DCHECK((regs2 & regs3).is_empty()); @@ -1303,7 +1303,7 @@ void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPop(RegList regs) { +void MacroAssembler::MultiPop(RegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1315,7 +1315,7 @@ void TurboAssembler::MultiPop(RegList regs) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPop(RegList regs1, RegList regs2) { +void MacroAssembler::MultiPop(RegList regs1, RegList regs2) { DCHECK((regs1 & regs2).is_empty()); int16_t stack_offset = 0; @@ -1334,7 +1334,7 @@ void TurboAssembler::MultiPop(RegList regs1, RegList regs2) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) { +void MacroAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) { DCHECK((regs1 & regs2).is_empty()); DCHECK((regs1 & regs3).is_empty()); DCHECK((regs2 & regs3).is_empty()); @@ -1361,7 +1361,7 @@ void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::MultiPushFPU(DoubleRegList regs) { +void MacroAssembler::MultiPushFPU(DoubleRegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -1374,7 +1374,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) { } } -void TurboAssembler::MultiPopFPU(DoubleRegList regs) { +void MacroAssembler::MultiPopFPU(DoubleRegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1386,7 +1386,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) { addi_d(sp, sp, stack_offset); } -void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw, +void MacroAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw) { DCHECK_LT(lsbw, msbw); DCHECK_LT(lsbw, 32); @@ -1394,7 +1394,7 @@ void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw, bstrpick_w(rk, rj, msbw, lsbw); } -void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw, +void MacroAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw) { DCHECK_LT(lsbw, msbw); DCHECK_LT(lsbw, 64); @@ -1402,17 +1402,17 @@ void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw, bstrpick_d(rk, rj, msbw, lsbw); } -void TurboAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); } +void MacroAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); } -void TurboAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); } +void MacroAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); } -void TurboAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) { +void MacroAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) { BlockTrampolinePoolScope block_trampoline_pool(this); movfr2gr_s(t8, fj); Ffint_d_uw(fd, t8); } -void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) { +void MacroAssembler::Ffint_d_uw(FPURegister fd, Register rj) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(rj != t7); @@ -1421,13 +1421,13 @@ void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) { ffint_d_l(fd, fd); } -void TurboAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) { +void MacroAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) { BlockTrampolinePoolScope block_trampoline_pool(this); movfr2gr_d(t8, fj); Ffint_d_ul(fd, t8); } -void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) { +void MacroAssembler::Ffint_d_ul(FPURegister fd, Register rj) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(rj != t7); @@ -1452,13 +1452,13 @@ void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) { bind(&conversion_done); } -void TurboAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) { +void MacroAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) { BlockTrampolinePoolScope block_trampoline_pool(this); movfr2gr_d(t8, fj); Ffint_s_uw(fd, t8); } -void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) { +void MacroAssembler::Ffint_s_uw(FPURegister fd, Register rj) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(rj != t7); @@ -1467,13 +1467,13 @@ void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) { ffint_s_l(fd, fd); } -void TurboAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) { +void MacroAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) { BlockTrampolinePoolScope block_trampoline_pool(this); movfr2gr_d(t8, fj); Ffint_s_ul(fd, t8); } -void TurboAssembler::Ffint_s_ul(FPURegister fd, Register rj) { +void MacroAssembler::Ffint_s_ul(FPURegister fd, Register rj) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(rj != t7); @@ -1530,28 +1530,28 @@ void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj, Ftintrz_l_d(fd, scratch); } -void TurboAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj, +void MacroAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch) { BlockTrampolinePoolScope block_trampoline_pool(this); Ftintrz_uw_d(t8, fj, scratch); movgr2fr_w(fd, t8); } -void TurboAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj, +void MacroAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch) { BlockTrampolinePoolScope block_trampoline_pool(this); Ftintrz_uw_s(t8, fj, scratch); movgr2fr_w(fd, t8); } -void TurboAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj, +void MacroAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch, Register result) { BlockTrampolinePoolScope block_trampoline_pool(this); Ftintrz_ul_d(t8, fj, scratch, result); movgr2fr_d(fd, t8); } -void TurboAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj, +void MacroAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch, Register result) { BlockTrampolinePoolScope block_trampoline_pool(this); Ftintrz_ul_s(t8, fj, scratch, result); @@ -1574,7 +1574,7 @@ void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) { ftintrp_w_d(fd, fj); } -void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj, +void MacroAssembler::Ftintrz_uw_d(Register rd, FPURegister fj, FPURegister scratch) { DCHECK(fj != scratch); DCHECK(rd != t7); @@ -1610,7 +1610,7 @@ void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj, bind(&done); } -void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj, +void MacroAssembler::Ftintrz_uw_s(Register rd, FPURegister fj, FPURegister scratch) { DCHECK(fj != scratch); DCHECK(rd != t7); @@ -1644,7 +1644,7 @@ void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj, bind(&done); } -void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj, +void MacroAssembler::Ftintrz_ul_d(Register rd, FPURegister fj, FPURegister scratch, Register result) { DCHECK(fj != scratch); DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); @@ -1699,7 +1699,7 @@ void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj, bind(&fail); } -void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj, +void MacroAssembler::Ftintrz_ul_s(Register rd, FPURegister fj, FPURegister scratch, Register result) { DCHECK(fj != scratch); DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7)); @@ -1758,7 +1758,7 @@ void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj, bind(&fail); } -void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, +void MacroAssembler::RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -1769,23 +1769,23 @@ void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, movgr2fcsr(scratch); } -void TurboAssembler::Floor_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Floor_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_floor); } -void TurboAssembler::Ceil_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Ceil_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_ceil); } -void TurboAssembler::Trunc_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Trunc_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_trunc); } -void TurboAssembler::Round_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Round_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_round); } -void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, +void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -1796,23 +1796,23 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, movgr2fcsr(scratch); } -void TurboAssembler::Floor_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Floor_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_floor); } -void TurboAssembler::Ceil_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Ceil_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_ceil); } -void TurboAssembler::Trunc_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Trunc_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_trunc); } -void TurboAssembler::Round_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Round_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_round); } -void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2, +void MacroAssembler::CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, CFRegister cd, bool f32) { if (f32) { fcmp_cond_s(cc, cmp1, cmp2, cd); @@ -1821,20 +1821,20 @@ void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2, } } -void TurboAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2, +void MacroAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, bool f32) { CompareF(cmp1, cmp2, CUN, cd, f32); } -void TurboAssembler::BranchTrueShortF(Label* target, CFRegister cj) { +void MacroAssembler::BranchTrueShortF(Label* target, CFRegister cj) { bcnez(cj, target); } -void TurboAssembler::BranchFalseShortF(Label* target, CFRegister cj) { +void MacroAssembler::BranchFalseShortF(Label* target, CFRegister cj) { bceqz(cj, target); } -void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) { +void MacroAssembler::BranchTrueF(Label* target, CFRegister cj) { // TODO(yuyin): can be optimzed bool long_branch = target->is_bound() ? !is_near(target, OffsetSize::kOffset21) @@ -1849,7 +1849,7 @@ void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) { } } -void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) { +void MacroAssembler::BranchFalseF(Label* target, CFRegister cj) { bool long_branch = target->is_bound() ? !is_near(target, OffsetSize::kOffset21) : is_trampoline_emitted(); @@ -1863,7 +1863,7 @@ void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) { } } -void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { +void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); DCHECK(src_low != scratch); @@ -1872,14 +1872,14 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { movgr2frh_w(dst, scratch); } -void TurboAssembler::Move(FPURegister dst, uint32_t src) { +void MacroAssembler::Move(FPURegister dst, uint32_t src) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(static_cast(src))); movgr2fr_w(dst, scratch); } -void TurboAssembler::Move(FPURegister dst, uint64_t src) { +void MacroAssembler::Move(FPURegister dst, uint64_t src) { // Handle special values first. if (src == base::bit_cast(0.0) && has_double_zero_reg_set_) { fmov_d(dst, kDoubleRegZero); @@ -1895,7 +1895,7 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) { } } -void TurboAssembler::Movz(Register rd, Register rj, Register rk) { +void MacroAssembler::Movz(Register rd, Register rj, Register rk) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); masknez(scratch, rj, rk); @@ -1903,7 +1903,7 @@ void TurboAssembler::Movz(Register rd, Register rj, Register rk) { or_(rd, rd, scratch); } -void TurboAssembler::Movn(Register rd, Register rj, Register rk) { +void MacroAssembler::Movn(Register rd, Register rj, Register rk) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); maskeqz(scratch, rj, rk); @@ -1911,7 +1911,7 @@ void TurboAssembler::Movn(Register rd, Register rj, Register rk) { or_(rd, rd, scratch); } -void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj, +void MacroAssembler::LoadZeroOnCondition(Register rd, Register rj, const Operand& rk, Condition cond) { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { @@ -1995,40 +1995,40 @@ void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj, } // namespace internal } // namespace internal -void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, +void MacroAssembler::LoadZeroIfConditionNotZero(Register dest, Register condition) { masknez(dest, dest, condition); } -void TurboAssembler::LoadZeroIfConditionZero(Register dest, +void MacroAssembler::LoadZeroIfConditionZero(Register dest, Register condition) { maskeqz(dest, dest, condition); } -void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) { +void MacroAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); movcf2gr(scratch, cc); LoadZeroIfConditionNotZero(dest, scratch); } -void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) { +void MacroAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); movcf2gr(scratch, cc); LoadZeroIfConditionZero(dest, scratch); } -void TurboAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); } +void MacroAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); } -void TurboAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); } +void MacroAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); } -void TurboAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); } +void MacroAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); } -void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); } +void MacroAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); } // TODO(LOONG_dev): Optimize like arm64, use simd instruction -void TurboAssembler::Popcnt_w(Register rd, Register rj) { +void MacroAssembler::Popcnt_w(Register rd, Register rj) { ASM_CODE_COMMENT(this); // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel // @@ -2072,7 +2072,7 @@ void TurboAssembler::Popcnt_w(Register rd, Register rj) { srli_w(rd, rd, shift); } -void TurboAssembler::Popcnt_d(Register rd, Register rj) { +void MacroAssembler::Popcnt_d(Register rd, Register rj) { ASM_CODE_COMMENT(this); int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3 @@ -2102,7 +2102,7 @@ void TurboAssembler::Popcnt_d(Register rd, Register rj) { srli_d(rd, rd, shift); } -void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, +void MacroAssembler::ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend) { sra_d(dest, source, pos); bstrpick_d(dest, dest, size - 1, 0); @@ -2124,7 +2124,7 @@ void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, } } -void TurboAssembler::InsertBits(Register dest, Register source, Register pos, +void MacroAssembler::InsertBits(Register dest, Register source, Register pos, int size) { Rotr_d(dest, dest, pos); bstrins_d(dest, source, size - 1, 0); @@ -2136,7 +2136,7 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos, } } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { DoubleRegister single_scratch = kScratchDoubleReg.low(); @@ -2159,7 +2159,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result, bcnez(FCC0, done); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -2193,7 +2193,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \ (cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg))) -void TurboAssembler::Branch(Label* L, bool need_link) { +void MacroAssembler::Branch(Label* L, bool need_link) { int offset = GetOffset(L, OffsetSize::kOffset26); if (need_link) { bl(offset); @@ -2202,7 +2202,7 @@ void TurboAssembler::Branch(Label* L, bool need_link) { } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rj, +void MacroAssembler::Branch(Label* L, Condition cond, Register rj, const Operand& rk, bool need_link) { if (L->is_bound()) { BRANCH_ARGS_CHECK(cond, rj, rk); @@ -2234,7 +2234,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rj, } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rj, +void MacroAssembler::Branch(Label* L, Condition cond, Register rj, RootIndex index) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -2242,11 +2242,11 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rj, Branch(L, cond, rj, Operand(scratch)); } -int32_t TurboAssembler::GetOffset(Label* L, OffsetSize bits) { +int32_t MacroAssembler::GetOffset(Label* L, OffsetSize bits) { return branch_offset_helper(L, bits) >> 2; } -Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk, +Register MacroAssembler::GetRkAsRegisterHelper(const Operand& rk, Register scratch) { Register r2 = no_reg; if (rk.is_reg()) { @@ -2259,7 +2259,7 @@ Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk, return r2; } -bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond, +bool MacroAssembler::BranchShortOrFallback(Label* L, Condition cond, Register rj, const Operand& rk, bool need_link) { UseScratchRegisterScope temps(this); @@ -2490,7 +2490,7 @@ bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond, return true; } -void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj, +void MacroAssembler::BranchShort(Label* L, Condition cond, Register rj, const Operand& rk, bool need_link) { BRANCH_ARGS_CHECK(cond, rj, rk); bool result = BranchShortOrFallback(L, cond, rj, rk, need_link); @@ -2498,7 +2498,7 @@ void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj, USE(result); } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { ASM_CODE_COMMENT(this); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -2508,11 +2508,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, constant_index * kPointerSize)); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { Ld_d(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Move(destination, kRootRegister); @@ -2521,7 +2521,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -2550,7 +2550,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond, Register rj, +void MacroAssembler::Jump(Register target, Condition cond, Register rj, const Operand& rk) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { @@ -2564,7 +2564,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rj, } } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { Label skip; if (cond != cc_always) { @@ -2578,13 +2578,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, } } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond, rj, rk); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { DCHECK(RelocInfo::IsCodeTarget(rmode)); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -2604,13 +2604,13 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { li(t7, reference); Jump(t7); } // Note: To call gcc-compiled C code on loonarch, you must call through t[0-8]. -void TurboAssembler::Call(Register target, Condition cond, Register rj, +void MacroAssembler::Call(Register target, Condition cond, Register rj, const Operand& rk) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { @@ -2639,7 +2639,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, } } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { BlockTrampolinePoolScope block_trampoline_pool(this); Label skip; @@ -2659,7 +2659,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, bind(&skip); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond, Register rj, const Operand& rk) { BlockTrampolinePoolScope block_trampoline_pool(this); Builtin builtin = Builtin::kNoBuiltinId; @@ -2672,7 +2672,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, rj, rk); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); static_assert(kSystemPointerSize == 8); static_assert(kSmiTagSize == 1); @@ -2686,22 +2686,22 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { Ld_d(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { ASM_CODE_COMMENT(this); LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); UseScratchRegisterScope temps(this); Register temp = temps.Acquire(); @@ -2735,7 +2735,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); UseScratchRegisterScope temps(this); @@ -2769,7 +2769,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -void TurboAssembler::PatchAndJump(Address target) { +void MacroAssembler::PatchAndJump(Address target) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -2782,7 +2782,7 @@ void TurboAssembler::PatchAndJump(Address target) { pc_ += sizeof(uint64_t); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { ASM_CODE_COMMENT(this); // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. @@ -2810,7 +2810,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode, Register scratch) { switch (type) { case kCountIsInteger: { @@ -2834,7 +2834,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode, @@ -2850,11 +2850,11 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::Ret(Condition cond, Register rj, const Operand& rk) { +void MacroAssembler::Ret(Condition cond, Register rj, const Operand& rk) { Jump(ra, cond, rj, rk); } -void TurboAssembler::Drop(int count, Condition cond, Register reg, +void MacroAssembler::Drop(int count, Condition cond, Register reg, const Operand& op) { if (count <= 0) { return; @@ -2885,23 +2885,23 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { } } -void TurboAssembler::Call(Label* target) { Branch(target, true); } +void MacroAssembler::Call(Label* target) { Branch(target, true); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(smi)); Push(scratch); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(handle)); Push(scratch); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order) { DCHECK(!AreAliased(array, size, scratch, scratch2)); Label loop, entry; @@ -2961,7 +2961,7 @@ void MacroAssembler::PopStackHandler() { St_d(a1, MemOperand(scratch, 0)); } -void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { fsub_d(dst, src, kDoubleRegZero); } @@ -2977,10 +2977,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); Ld_d(destination, MemOperand(kRootRegister, static_cast(offset))); } @@ -3227,7 +3227,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg, // ----------------------------------------------------------------------------- // Runtime calls. -void TurboAssembler::AddOverflow_d(Register dst, Register left, +void MacroAssembler::AddOverflow_d(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3260,7 +3260,7 @@ void TurboAssembler::AddOverflow_d(Register dst, Register left, } } -void TurboAssembler::SubOverflow_d(Register dst, Register left, +void MacroAssembler::SubOverflow_d(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3293,7 +3293,7 @@ void TurboAssembler::SubOverflow_d(Register dst, Register left, } } -void TurboAssembler::MulOverflow_w(Register dst, Register left, +void MacroAssembler::MulOverflow_w(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3325,7 +3325,7 @@ void TurboAssembler::MulOverflow_w(Register dst, Register left, xor_(overflow, overflow, scratch2); } -void TurboAssembler::MulOverflow_d(Register dst, Register left, +void MacroAssembler::MulOverflow_d(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3441,10 +3441,10 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, // ----------------------------------------------------------------------------- // Debugging. -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj, +void MacroAssembler::Check(Condition cc, AbortReason reason, Register rj, Operand rk) { Label L; Branch(&L, cc, rj, rk); @@ -3453,7 +3453,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj, bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -3511,7 +3511,7 @@ void TurboAssembler::Abort(AbortReason reason) { } } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { Ld_d(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } @@ -3522,16 +3522,16 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { Ld_d(dst, MemOperand(dst, Context::SlotOffset(index))); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(StackFrame::TypeToMarker(type))); PushCommonFrame(scratch); } -void TurboAssembler::Prologue() { PushStandardFrame(a1); } +void MacroAssembler::Prologue() { PushStandardFrame(a1); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); Push(ra, fp); @@ -3546,7 +3546,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); addi_d(sp, fp, 2 * kPointerSize); Ld_d(ra, MemOperand(fp, 1 * kPointerSize)); @@ -3662,7 +3662,7 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return, } } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_LOONG64 // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -3678,7 +3678,7 @@ int TurboAssembler::ActivationFrameAlignment() { #endif // V8_HOST_ARCH_LOONG64 } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { if (SmiValuesAre32Bits()) { Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset()))); } else { @@ -3688,7 +3688,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) { +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) { DCHECK_EQ(0, kSmiTag); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3706,12 +3706,12 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs, Operand rk) { if (v8_flags.debug_code) Check(cc, reason, rs, rk); } -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -3722,7 +3722,7 @@ void TurboAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -3852,7 +3852,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -3867,12 +3867,12 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, fmax_s(dst, src1, src2); } -void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { fadd_s(dst, src1, src2); } -void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -3887,12 +3887,12 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, fmin_s(dst, src1, src2); } -void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { fadd_s(dst, src1, src2); } -void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -3907,12 +3907,12 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, fmax_d(dst, src1, src2); } -void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { fadd_d(dst, src1, src2); } -void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -3927,7 +3927,7 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, fmin_d(dst, src1, src2); } -void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { fadd_d(dst, src1, src2); } @@ -3935,7 +3935,7 @@ void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, static const int kRegisterPassedArguments = 8; static const int kFPRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; @@ -3955,7 +3955,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { ASM_CODE_COMMENT(this); @@ -3978,12 +3978,12 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, } } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); @@ -3992,22 +3992,22 @@ void TurboAssembler::CallCFunction(ExternalReference function, CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); @@ -4096,7 +4096,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, #undef BRANCH_ARGS_CHECK -void TurboAssembler::CheckPageFlag(const Register& object, int mask, +void MacroAssembler::CheckPageFlag(const Register& object, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -4123,12 +4123,12 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { // TODO(LOONG_dev): range check, add Pcadd macro function? pcaddi(dst, -pc_offset() >> 2); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -4141,14 +4141,14 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::LoadCodeEntry(Register destination, +void MacroAssembler::LoadCodeEntry(Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); Ld_d(destination, FieldMemOperand(code_data_container_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin( +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin( Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -4158,13 +4158,13 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin( Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_data_container_object) { +void MacroAssembler::CallCodeObject(Register code_data_container_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_data_container_object, code_data_container_object); Call(code_data_container_object); } -void TurboAssembler::JumpCodeObject(Register code_data_container_object, +void MacroAssembler::JumpCodeObject(Register code_data_container_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); diff --git a/src/codegen/loong64/macro-assembler-loong64.h b/src/codegen/loong64/macro-assembler-loong64.h index e4a7d9c1fa..2fcc1af3fc 100644 --- a/src/codegen/loong64/macro-assembler-loong64.h +++ b/src/codegen/loong64/macro-assembler-loong64.h @@ -59,9 +59,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) { return MemOperand(object, offset - kHeapObjectTag); } -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; // Activation support. void EnterFrame(StackFrame::Type type); @@ -773,46 +773,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Define an exception handler and bind a label. void BindExceptionHandler(Label* label) { bind(label); } - protected: - inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch); - inline int32_t GetOffset(Label* L, OffsetSize bits); - - private: - bool has_double_zero_reg_set_ = false; - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it - // succeeds, otherwise falls through if result is saturated. On return - // 'result' either holds answer, or is clobbered on fall through. - void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, - Label* done); - - bool BranchShortOrFallback(Label* L, Condition cond, Register rj, - const Operand& rk, bool need_link); - - // f32 or f64 - void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, - CFRegister cd, bool f32 = true); - - void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, - bool f32 = true); - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); - - void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode); - - void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode); - - // Push a fixed frame, consisting of ra, fp. - void PushCommonFrame(Register marker_reg = no_reg); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. // TODO(LOONG_dev): LOONG64: Remove this function once we stick with the @@ -1079,17 +1039,50 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { DecodeField(reg, reg); } + protected: + inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch); + inline int32_t GetOffset(Label* L, OffsetSize bits); + private: + bool has_double_zero_reg_set_ = false; + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, InvokeType type); + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, + Label* done); + + bool BranchShortOrFallback(Label* L, Condition cond, Register rj, + const Operand& rk, bool need_link); + + // f32 or f64 + void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc, + CFRegister cd, bool f32 = true); + + void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd, + bool f32 = true); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + + void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode); + + void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode); + + // Push a fixed frame, consisting of ra, fp. + void PushCommonFrame(Register marker_reg = no_reg); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); }; template -void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, +void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count, Func GetLabelFunction) { UseScratchRegisterScope scope(this); Register scratch = scope.Acquire(); diff --git a/src/codegen/turbo-assembler.cc b/src/codegen/macro-assembler-base.cc similarity index 87% rename from src/codegen/turbo-assembler.cc rename to src/codegen/macro-assembler-base.cc index 3c0033c7da..fce27a1e34 100644 --- a/src/codegen/turbo-assembler.cc +++ b/src/codegen/macro-assembler-base.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/codegen/turbo-assembler.h" +#include "src/codegen/macro-assembler-base.h" #include "src/builtins/builtins.h" #include "src/builtins/constants-table-builder.h" @@ -15,7 +15,7 @@ namespace v8 { namespace internal { -TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate, +MacroAssemblerBase::MacroAssemblerBase(Isolate* isolate, const AssemblerOptions& options, CodeObjectRequired create_code_object, std::unique_ptr buffer) @@ -26,7 +26,7 @@ TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate, } } -Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) { +Address MacroAssemblerBase::BuiltinEntry(Builtin builtin) { DCHECK(Builtins::IsBuiltinId(builtin)); if (isolate_ != nullptr) { Address entry = isolate_->builtin_entry_table()[Builtins::ToInt(builtin)]; @@ -38,7 +38,7 @@ Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) { return d.InstructionStartOfBuiltin(builtin); } -void TurboAssemblerBase::IndirectLoadConstant(Register destination, +void MacroAssemblerBase::IndirectLoadConstant(Register destination, Handle object) { CHECK(root_array_available_); @@ -71,7 +71,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination, } } -void TurboAssemblerBase::IndirectLoadExternalReference( +void MacroAssemblerBase::IndirectLoadExternalReference( Register destination, ExternalReference reference) { CHECK(root_array_available_); @@ -90,24 +90,24 @@ void TurboAssemblerBase::IndirectLoadExternalReference( } // static -int32_t TurboAssemblerBase::RootRegisterOffsetForRootIndex( +int32_t MacroAssemblerBase::RootRegisterOffsetForRootIndex( RootIndex root_index) { return IsolateData::root_slot_offset(root_index); } // static -int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) { +int32_t MacroAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) { return IsolateData::BuiltinSlotOffset(builtin); } // static -intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference( +intptr_t MacroAssemblerBase::RootRegisterOffsetForExternalReference( Isolate* isolate, const ExternalReference& reference) { return static_cast(reference.address() - isolate->isolate_root()); } // static -int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry( +int32_t MacroAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry( Isolate* isolate, const ExternalReference& reference) { // Encode as an index into the external reference table stored on the // isolate. @@ -120,13 +120,13 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry( } // static -bool TurboAssemblerBase::IsAddressableThroughRootRegister( +bool MacroAssemblerBase::IsAddressableThroughRootRegister( Isolate* isolate, const ExternalReference& reference) { Address address = reference.address(); return isolate->root_register_addressable_region().contains(address); } -Tagged_t TurboAssemblerBase::ReadOnlyRootPtr(RootIndex index) { +Tagged_t MacroAssemblerBase::ReadOnlyRootPtr(RootIndex index) { DCHECK(RootsTable::IsReadOnly(index)); CHECK(V8_STATIC_ROOTS_BOOL); CHECK(isolate_->root(index).IsHeapObject()); diff --git a/src/codegen/turbo-assembler.h b/src/codegen/macro-assembler-base.h similarity index 81% rename from src/codegen/turbo-assembler.h rename to src/codegen/macro-assembler-base.h index f9d55bba96..976b154d37 100644 --- a/src/codegen/turbo-assembler.h +++ b/src/codegen/macro-assembler-base.h @@ -2,8 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_ -#define V8_CODEGEN_TURBO_ASSEMBLER_H_ +#ifndef V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_ +#define V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_ #include @@ -15,30 +15,24 @@ namespace v8 { namespace internal { -// Common base class for platform-specific TurboAssemblers containing +// Common base class for platform-specific MacroAssemblers containing // platform-independent bits. -// You will encounter two subclasses, TurboAssembler (derives from -// TurboAssemblerBase), and MacroAssembler (derives from TurboAssembler). The -// main difference is that MacroAssembler is allowed to access the isolate, and -// TurboAssembler accesses the isolate in a very limited way. TurboAssembler -// contains all the functionality that is used by Turbofan, and does not expect -// to be running on the main thread. -class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { +// TODO(victorgomes): We should use LocalIsolate instead of Isolate in the +// methods of this class. +class V8_EXPORT_PRIVATE MacroAssemblerBase : public Assembler { public: // Constructors are declared public to inherit them in derived classes // with `using` directive. - TurboAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object, + MacroAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object, std::unique_ptr buffer = {}) - : TurboAssemblerBase(isolate, AssemblerOptions::Default(isolate), + : MacroAssemblerBase(isolate, AssemblerOptions::Default(isolate), create_code_object, std::move(buffer)) {} - TurboAssemblerBase(Isolate* isolate, const AssemblerOptions& options, + MacroAssemblerBase(Isolate* isolate, const AssemblerOptions& options, CodeObjectRequired create_code_object, std::unique_ptr buffer = {}); - Isolate* isolate() const { - return isolate_; - } + Isolate* isolate() const { return isolate_; } Handle CodeObject() const { DCHECK(!code_object_.is_null()); @@ -135,25 +129,25 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler { int comment_depth_ = 0; - DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssemblerBase); }; // Avoids emitting calls to the {Builtin::kAbort} builtin when emitting // debug code during the lifetime of this scope object. class V8_NODISCARD HardAbortScope { public: - explicit HardAbortScope(TurboAssemblerBase* assembler) + explicit HardAbortScope(MacroAssemblerBase* assembler) : assembler_(assembler), old_value_(assembler->should_abort_hard()) { assembler_->set_abort_hard(true); } ~HardAbortScope() { assembler_->set_abort_hard(old_value_); } private: - TurboAssemblerBase* assembler_; + MacroAssemblerBase* assembler_; bool old_value_; }; } // namespace internal } // namespace v8 -#endif // V8_CODEGEN_TURBO_ASSEMBLER_H_ +#endif // V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_ diff --git a/src/codegen/macro-assembler.h b/src/codegen/macro-assembler.h index 61b26a320f..3e5d83806d 100644 --- a/src/codegen/macro-assembler.h +++ b/src/codegen/macro-assembler.h @@ -5,7 +5,7 @@ #ifndef V8_CODEGEN_MACRO_ASSEMBLER_H_ #define V8_CODEGEN_MACRO_ASSEMBLER_H_ -#include "src/codegen/turbo-assembler.h" +#include "src/codegen/macro-assembler-base.h" #include "src/execution/frames.h" #include "src/heap/heap.h" @@ -82,25 +82,25 @@ static constexpr int kMaxCParameters = 256; class V8_NODISCARD FrameScope { public: - explicit FrameScope(TurboAssembler* tasm, StackFrame::Type type) + explicit FrameScope(MacroAssembler* masm, StackFrame::Type type) : #ifdef V8_CODE_COMMENTS - comment_(tasm, frame_name(type)), + comment_(masm, frame_name(type)), #endif - tasm_(tasm), + masm_(masm), type_(type), - old_has_frame_(tasm->has_frame()) { - tasm->set_has_frame(true); + old_has_frame_(masm->has_frame()) { + masm->set_has_frame(true); if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) { - tasm->EnterFrame(type); + masm->EnterFrame(type); } } ~FrameScope() { if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) { - tasm_->LeaveFrame(type_); + masm_->LeaveFrame(type_); } - tasm_->set_has_frame(old_has_frame_); + masm_->set_has_frame(old_has_frame_); } private: @@ -125,7 +125,7 @@ class V8_NODISCARD FrameScope { Assembler::CodeComment comment_; #endif // V8_CODE_COMMENTS - TurboAssembler* tasm_; + MacroAssembler* masm_; StackFrame::Type const type_; bool const old_has_frame_; }; @@ -198,7 +198,7 @@ class V8_NODISCARD AllowExternalCallThatCantCauseGC : public FrameScope { // scope object. class V8_NODISCARD NoRootArrayScope { public: - explicit NoRootArrayScope(TurboAssembler* masm) + explicit NoRootArrayScope(MacroAssembler* masm) : masm_(masm), old_value_(masm->root_array_available()) { masm->set_root_array_available(false); } @@ -206,7 +206,7 @@ class V8_NODISCARD NoRootArrayScope { ~NoRootArrayScope() { masm_->set_root_array_available(old_value_); } private: - TurboAssembler* masm_; + MacroAssembler* masm_; bool old_value_; }; diff --git a/src/codegen/mips64/assembler-mips64.cc b/src/codegen/mips64/assembler-mips64.cc index e95a07dc84..2e91386ad6 100644 --- a/src/codegen/mips64/assembler-mips64.cc +++ b/src/codegen/mips64/assembler-mips64.cc @@ -819,7 +819,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) { Instr instr_b = REGIMM | BGEZAL; // Branch and link. instr_b = SetBranchOffset(pos, target_pos, instr_b); // Correct ra register to point to one instruction after jalr from - // TurboAssembler::BranchAndLinkLong. + // MacroAssembler::BranchAndLinkLong. Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift | kOptimizedBranchAndLinkLongReturnOffset; diff --git a/src/codegen/mips64/assembler-mips64.h b/src/codegen/mips64/assembler-mips64.h index 02b77ee767..3e4c5239f0 100644 --- a/src/codegen/mips64/assembler-mips64.h +++ b/src/codegen/mips64/assembler-mips64.h @@ -294,7 +294,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Adjust ra register in branch delay slot of bal instruction so to skip // instructions not needed after optimization of PIC in - // TurboAssembler::BranchAndLink method. + // MacroAssembler::BranchAndLink method. static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize; diff --git a/src/codegen/mips64/macro-assembler-mips64.cc b/src/codegen/mips64/macro-assembler-mips64.cc index 017fa215df..df96b7b92a 100644 --- a/src/codegen/mips64/macro-assembler-mips64.cc +++ b/src/codegen/mips64/macro-assembler-mips64.cc @@ -48,7 +48,7 @@ static inline bool IsZero(const Operand& rt) { } } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -64,7 +64,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -81,7 +81,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -98,18 +98,18 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition cond, Register src1, const Operand& src2) { Branch(2, NegateCondition(cond), src1, src2); Ld(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { if (marker_reg.is_valid()) { Push(ra, fp, marker_reg); Daddu(fp, sp, Operand(kPointerSize)); @@ -119,7 +119,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int offset = -StandardFrameConstants::kContextOffset; if (function_reg.is_valid()) { Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); @@ -176,17 +176,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); @@ -210,7 +210,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -233,7 +233,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -320,7 +320,7 @@ void MacroAssembler::RecordWrite(Register object, Register address, // --------------------------------------------------------------------------- // Instruction macros. -void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { addu(rd, rs, rt.rm()); } else { @@ -337,7 +337,7 @@ void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Daddu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { daddu(rd, rs, rt.rm()); } else { @@ -354,7 +354,7 @@ void TurboAssembler::Daddu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { subu(rd, rs, rt.rm()); } else { @@ -380,7 +380,7 @@ void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { dsubu(rd, rs, rt.rm()); } else if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) { @@ -408,7 +408,7 @@ void TurboAssembler::Dsubu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mul(rd, rs, rt.rm()); } else { @@ -421,7 +421,7 @@ void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { mult(rs, rt.rm()); @@ -444,7 +444,7 @@ void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { multu(rs, rt.rm()); @@ -467,7 +467,7 @@ void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmul(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant == kMips64r6) { dmul(rd, rs, rt.rm()); @@ -490,7 +490,7 @@ void TurboAssembler::Dmul(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant == kMips64r6) { dmuh(rd, rs, rt.rm()); @@ -513,7 +513,7 @@ void TurboAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant == kMips64r6) { dmuhu(rd, rs, rt.rm()); @@ -536,7 +536,7 @@ void TurboAssembler::Dmulhu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mult(Register rs, const Operand& rt) { +void MacroAssembler::Mult(Register rs, const Operand& rt) { if (rt.is_reg()) { mult(rs, rt.rm()); } else { @@ -549,7 +549,7 @@ void TurboAssembler::Mult(Register rs, const Operand& rt) { } } -void TurboAssembler::Dmult(Register rs, const Operand& rt) { +void MacroAssembler::Dmult(Register rs, const Operand& rt) { if (rt.is_reg()) { dmult(rs, rt.rm()); } else { @@ -562,7 +562,7 @@ void TurboAssembler::Dmult(Register rs, const Operand& rt) { } } -void TurboAssembler::Multu(Register rs, const Operand& rt) { +void MacroAssembler::Multu(Register rs, const Operand& rt) { if (rt.is_reg()) { multu(rs, rt.rm()); } else { @@ -575,7 +575,7 @@ void TurboAssembler::Multu(Register rs, const Operand& rt) { } } -void TurboAssembler::Dmultu(Register rs, const Operand& rt) { +void MacroAssembler::Dmultu(Register rs, const Operand& rt) { if (rt.is_reg()) { dmultu(rs, rt.rm()); } else { @@ -588,7 +588,7 @@ void TurboAssembler::Dmultu(Register rs, const Operand& rt) { } } -void TurboAssembler::Div(Register rs, const Operand& rt) { +void MacroAssembler::Div(Register rs, const Operand& rt) { if (rt.is_reg()) { div(rs, rt.rm()); } else { @@ -601,7 +601,7 @@ void TurboAssembler::Div(Register rs, const Operand& rt) { } } -void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Div(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { div(rs, rt.rm()); @@ -624,7 +624,7 @@ void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { div(rs, rt.rm()); @@ -647,7 +647,7 @@ void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { divu(rs, rt.rm()); @@ -670,7 +670,7 @@ void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Ddiv(Register rs, const Operand& rt) { +void MacroAssembler::Ddiv(Register rs, const Operand& rt) { if (rt.is_reg()) { ddiv(rs, rt.rm()); } else { @@ -683,7 +683,7 @@ void TurboAssembler::Ddiv(Register rs, const Operand& rt) { } } -void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { if (kArchVariant != kMips64r6) { if (rt.is_reg()) { ddiv(rs, rt.rm()); @@ -711,7 +711,7 @@ void TurboAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Divu(Register rs, const Operand& rt) { +void MacroAssembler::Divu(Register rs, const Operand& rt) { if (rt.is_reg()) { divu(rs, rt.rm()); } else { @@ -724,7 +724,7 @@ void TurboAssembler::Divu(Register rs, const Operand& rt) { } } -void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { divu(rs, rt.rm()); @@ -747,7 +747,7 @@ void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Ddivu(Register rs, const Operand& rt) { +void MacroAssembler::Ddivu(Register rs, const Operand& rt) { if (rt.is_reg()) { ddivu(rs, rt.rm()); } else { @@ -760,7 +760,7 @@ void TurboAssembler::Ddivu(Register rs, const Operand& rt) { } } -void TurboAssembler::Ddivu(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { if (kArchVariant != kMips64r6) { ddivu(rs, rt.rm()); @@ -783,7 +783,7 @@ void TurboAssembler::Ddivu(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) { if (kArchVariant != kMips64r6) { if (rt.is_reg()) { ddiv(rs, rt.rm()); @@ -811,7 +811,7 @@ void TurboAssembler::Dmod(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) { if (kArchVariant != kMips64r6) { if (rt.is_reg()) { ddivu(rs, rt.rm()); @@ -839,7 +839,7 @@ void TurboAssembler::Dmodu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { and_(rd, rs, rt.rm()); } else { @@ -856,7 +856,7 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { or_(rd, rs, rt.rm()); } else { @@ -873,7 +873,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { xor_(rd, rs, rt.rm()); } else { @@ -890,7 +890,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { nor(rd, rs, rt.rm()); } else { @@ -903,11 +903,11 @@ void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Neg(Register rs, const Operand& rt) { +void MacroAssembler::Neg(Register rs, const Operand& rt) { dsubu(rs, zero_reg, rt.rm()); } -void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rs, rt.rm()); } else { @@ -925,7 +925,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rs, rt.rm()); } else { @@ -949,7 +949,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sle(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rt.rm(), rs); } else { @@ -964,7 +964,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { xori(rd, rd, 1); } -void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sleu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rt.rm(), rs); } else { @@ -979,17 +979,17 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { xori(rd, rd, 1); } -void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sge(Register rd, Register rs, const Operand& rt) { Slt(rd, rs, rt); xori(rd, rd, 1); } -void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { Sltu(rd, rs, rt); xori(rd, rd, 1); } -void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgt(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rt.rm(), rs); } else { @@ -1003,7 +1003,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rt.rm(), rs); } else { @@ -1017,7 +1017,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { rotrv(rd, rs, rt.rm()); } else { @@ -1029,7 +1029,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { drotrv(rd, rs, rt.rm()); } else { @@ -1047,7 +1047,7 @@ void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { pref(hint, rs); } -void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, +void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, Register scratch) { DCHECK(sa >= 1 && sa <= 31); if (kArchVariant == kMips64r6 && sa <= 4) { @@ -1060,7 +1060,7 @@ void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, } } -void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa, +void MacroAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa, Register scratch) { DCHECK(sa >= 1 && sa <= 63); if (kArchVariant == kMips64r6 && sa <= 4) { @@ -1076,7 +1076,7 @@ void TurboAssembler::Dlsa(Register rd, Register rt, Register rs, uint8_t sa, } } -void TurboAssembler::Bovc(Register rs, Register rt, Label* L) { +void MacroAssembler::Bovc(Register rs, Register rt, Label* L) { if (is_trampoline_emitted()) { Label skip; bnvc(rs, rt, &skip); @@ -1087,7 +1087,7 @@ void TurboAssembler::Bovc(Register rs, Register rt, Label* L) { } } -void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) { +void MacroAssembler::Bnvc(Register rs, Register rt, Label* L) { if (is_trampoline_emitted()) { Label skip; bovc(rs, rt, &skip); @@ -1101,7 +1101,7 @@ void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) { // ------------Pseudo-instructions------------- // Change endianness -void TurboAssembler::ByteSwapSigned(Register dest, Register src, +void MacroAssembler::ByteSwapSigned(Register dest, Register src, int operand_size) { DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8); DCHECK(kArchVariant == kMips64r6 || kArchVariant == kMips64r2); @@ -1117,7 +1117,7 @@ void TurboAssembler::ByteSwapSigned(Register dest, Register src, } } -void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, +void MacroAssembler::ByteSwapUnsigned(Register dest, Register src, int operand_size) { DCHECK(operand_size == 2 || operand_size == 4); if (operand_size == 2) { @@ -1130,7 +1130,7 @@ void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, } } -void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1154,7 +1154,7 @@ void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) { if (kArchVariant == kMips64r6) { Lwu(rd, rs); } else { @@ -1164,7 +1164,7 @@ void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Usw(Register rd, const MemOperand& rs) { +void MacroAssembler::Usw(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); DCHECK(rd != rs.rm()); @@ -1181,7 +1181,7 @@ void TurboAssembler::Usw(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulh(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1215,7 +1215,7 @@ void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1249,7 +1249,7 @@ void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { +void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { DCHECK(rd != at); DCHECK(rs.rm() != at); DCHECK(rs.rm() != scratch); @@ -1278,7 +1278,7 @@ void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { } } -void TurboAssembler::Uld(Register rd, const MemOperand& rs) { +void MacroAssembler::Uld(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1313,7 +1313,7 @@ void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs, Daddu(rd, rd, scratch); } -void TurboAssembler::Usd(Register rd, const MemOperand& rs) { +void MacroAssembler::Usd(Register rd, const MemOperand& rs) { DCHECK(rd != at); DCHECK(rs.rm() != at); if (kArchVariant == kMips64r6) { @@ -1337,7 +1337,7 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs, Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); } -void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, +void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch) { if (kArchVariant == kMips64r6) { Lwc1(fd, rs); @@ -1348,7 +1348,7 @@ void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, } } -void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs, +void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs, Register scratch) { if (kArchVariant == kMips64r6) { Swc1(fd, rs); @@ -1359,7 +1359,7 @@ void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs, } } -void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs, +void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK(scratch != at); if (kArchVariant == kMips64r6) { @@ -1371,7 +1371,7 @@ void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs, } } -void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs, +void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK(scratch != at); if (kArchVariant == kMips64r6) { @@ -1383,97 +1383,97 @@ void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs, } } -void TurboAssembler::Lb(Register rd, const MemOperand& rs) { +void MacroAssembler::Lb(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lb(rd, source); } -void TurboAssembler::Lbu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lbu(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lbu(rd, source); } -void TurboAssembler::Sb(Register rd, const MemOperand& rs) { +void MacroAssembler::Sb(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); sb(rd, source); } -void TurboAssembler::Lh(Register rd, const MemOperand& rs) { +void MacroAssembler::Lh(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lh(rd, source); } -void TurboAssembler::Lhu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lhu(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lhu(rd, source); } -void TurboAssembler::Sh(Register rd, const MemOperand& rs) { +void MacroAssembler::Sh(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); sh(rd, source); } -void TurboAssembler::Lw(Register rd, const MemOperand& rs) { +void MacroAssembler::Lw(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lw(rd, source); } -void TurboAssembler::Lwu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lwu(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); lwu(rd, source); } -void TurboAssembler::Sw(Register rd, const MemOperand& rs) { +void MacroAssembler::Sw(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); sw(rd, source); } -void TurboAssembler::Ld(Register rd, const MemOperand& rs) { +void MacroAssembler::Ld(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); ld(rd, source); } -void TurboAssembler::Sd(Register rd, const MemOperand& rs) { +void MacroAssembler::Sd(Register rd, const MemOperand& rs) { MemOperand source = rs; AdjustBaseAndOffset(&source); sd(rd, source); } -void TurboAssembler::Lwc1(FPURegister fd, const MemOperand& src) { +void MacroAssembler::Lwc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); lwc1(fd, tmp); } -void TurboAssembler::Swc1(FPURegister fs, const MemOperand& src) { +void MacroAssembler::Swc1(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); swc1(fs, tmp); } -void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) { +void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); ldc1(fd, tmp); } -void TurboAssembler::Sdc1(FPURegister fs, const MemOperand& src) { +void MacroAssembler::Sdc1(FPURegister fs, const MemOperand& src) { MemOperand tmp = src; AdjustBaseAndOffset(&tmp); sdc1(fs, tmp); } -void TurboAssembler::Ll(Register rd, const MemOperand& rs) { +void MacroAssembler::Ll(Register rd, const MemOperand& rs) { bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset()) : is_int16(rs.offset()); if (is_one_instruction) { @@ -1487,7 +1487,7 @@ void TurboAssembler::Ll(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Lld(Register rd, const MemOperand& rs) { +void MacroAssembler::Lld(Register rd, const MemOperand& rs) { bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset()) : is_int16(rs.offset()); if (is_one_instruction) { @@ -1501,7 +1501,7 @@ void TurboAssembler::Lld(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Sc(Register rd, const MemOperand& rs) { +void MacroAssembler::Sc(Register rd, const MemOperand& rs) { bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset()) : is_int16(rs.offset()); if (is_one_instruction) { @@ -1515,7 +1515,7 @@ void TurboAssembler::Sc(Register rd, const MemOperand& rs) { } } -void TurboAssembler::Scd(Register rd, const MemOperand& rs) { +void MacroAssembler::Scd(Register rd, const MemOperand& rs) { bool is_one_instruction = (kArchVariant == kMips64r6) ? is_int9(rs.offset()) : is_int16(rs.offset()); if (is_one_instruction) { @@ -1529,7 +1529,7 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) { } } -void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { +void MacroAssembler::li(Register dst, Handle value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1540,7 +1540,7 @@ void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { li(dst, Operand(value), mode); } -void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { +void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1560,7 +1560,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) { } } -void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) { +void MacroAssembler::LiLower32BitHelper(Register rd, Operand j) { if (is_int16(static_cast(j.immediate()))) { daddiu(rd, zero_reg, (j.immediate() & kImm16Mask)); } else if (!(j.immediate() & kUpper16MaskOf64)) { @@ -1584,7 +1584,7 @@ static inline int InstrCountForLoadReplicatedConst32(int64_t value) { return INT_MAX; } -int TurboAssembler::InstrCountForLi64Bit(int64_t value) { +int MacroAssembler::InstrCountForLi64Bit(int64_t value) { if (is_int32(value)) { return InstrCountForLiLower32Bit(value); } else { @@ -1679,7 +1679,7 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) { // All changes to if...else conditions here must be added to // InstrCountForLi64Bit as well. -void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); DCHECK(!MustUseReg(j.rmode())); DCHECK(mode == OPTIMIZE_SIZE); @@ -1857,7 +1857,7 @@ void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { } } -void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); BlockTrampolinePoolScope block_trampoline_pool(this); if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { @@ -1919,7 +1919,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { } } -void TurboAssembler::MultiPush(RegList regs) { +void MacroAssembler::MultiPush(RegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kPointerSize; @@ -1932,7 +1932,7 @@ void TurboAssembler::MultiPush(RegList regs) { } } -void TurboAssembler::MultiPop(RegList regs) { +void MacroAssembler::MultiPop(RegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1944,7 +1944,7 @@ void TurboAssembler::MultiPop(RegList regs) { daddiu(sp, sp, stack_offset); } -void TurboAssembler::MultiPushFPU(DoubleRegList regs) { +void MacroAssembler::MultiPushFPU(DoubleRegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -1957,7 +1957,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) { } } -void TurboAssembler::MultiPopFPU(DoubleRegList regs) { +void MacroAssembler::MultiPopFPU(DoubleRegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1969,7 +1969,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) { daddiu(sp, sp, stack_offset); } -void TurboAssembler::MultiPushMSA(DoubleRegList regs) { +void MacroAssembler::MultiPushMSA(DoubleRegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kSimd128Size; @@ -1982,7 +1982,7 @@ void TurboAssembler::MultiPushMSA(DoubleRegList regs) { } } -void TurboAssembler::MultiPopMSA(DoubleRegList regs) { +void MacroAssembler::MultiPopMSA(DoubleRegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -1994,14 +1994,14 @@ void TurboAssembler::MultiPopMSA(DoubleRegList regs) { daddiu(sp, sp, stack_offset); } -void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos, +void MacroAssembler::Ext(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK_LT(pos, 32); DCHECK_LT(pos + size, 33); ext_(rt, rs, pos, size); } -void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos, +void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size && pos + size <= 64); @@ -2014,7 +2014,7 @@ void TurboAssembler::Dext(Register rt, Register rs, uint16_t pos, } } -void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos, +void MacroAssembler::Ins(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK_LT(pos, 32); DCHECK_LE(pos + size, 32); @@ -2022,7 +2022,7 @@ void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos, ins_(rt, rs, pos, size); } -void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos, +void MacroAssembler::Dins(Register rt, Register rs, uint16_t pos, uint16_t size) { DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size && pos + size <= 64); @@ -2035,7 +2035,7 @@ void TurboAssembler::Dins(Register rt, Register rs, uint16_t pos, } } -void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, +void MacroAssembler::ExtractBits(Register dest, Register source, Register pos, int size, bool sign_extend) { dsrav(dest, source, pos); Dext(dest, dest, 0, size); @@ -2057,7 +2057,7 @@ void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, } } -void TurboAssembler::InsertBits(Register dest, Register source, Register pos, +void MacroAssembler::InsertBits(Register dest, Register source, Register pos, int size) { Dror(dest, dest, pos); Dins(dest, source, 0, size); @@ -2069,7 +2069,7 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos, } } -void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { +void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) { if (kArchVariant == kMips64r6) { // r6 neg_s changes the sign for NaN-like operands as well. neg_s(fd, fs); @@ -2094,7 +2094,7 @@ void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { } } -void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { +void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) { if (kArchVariant == kMips64r6) { // r6 neg_d changes the sign for NaN-like operands as well. neg_d(fd, fs); @@ -2119,14 +2119,14 @@ void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { } } -void TurboAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) { +void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) { // Move the data from fs to t8. BlockTrampolinePoolScope block_trampoline_pool(this); mfc1(t8, fs); Cvt_d_uw(fd, t8); } -void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Convert rs to a FP value in fd. @@ -2139,14 +2139,14 @@ void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) { cvt_d_l(fd, fd); } -void TurboAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) { +void MacroAssembler::Cvt_d_ul(FPURegister fd, FPURegister fs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Move the data from fs to t8. dmfc1(t8, fs); Cvt_d_ul(fd, t8); } -void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Convert rs to a FP value in fd. @@ -2174,14 +2174,14 @@ void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) { bind(&conversion_done); } -void TurboAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) { +void MacroAssembler::Cvt_s_uw(FPURegister fd, FPURegister fs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Move the data from fs to t8. mfc1(t8, fs); Cvt_s_uw(fd, t8); } -void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Convert rs to a FP value in fd. DCHECK(rs != t9); @@ -2193,14 +2193,14 @@ void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) { cvt_s_l(fd, fd); } -void TurboAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) { +void MacroAssembler::Cvt_s_ul(FPURegister fd, FPURegister fs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Move the data from fs to t8. dmfc1(t8, fs); Cvt_s_ul(fd, t8); } -void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) { BlockTrampolinePoolScope block_trampoline_pool(this); // Convert rs to a FP value in fd. @@ -2260,28 +2260,28 @@ void MacroAssembler::Trunc_l_ud(FPURegister fd, FPURegister fs, trunc_l_d(fd, fs); } -void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, +void MacroAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch) { BlockTrampolinePoolScope block_trampoline_pool(this); Trunc_uw_d(t8, fs, scratch); mtc1(t8, fd); } -void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs, +void MacroAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch) { BlockTrampolinePoolScope block_trampoline_pool(this); Trunc_uw_s(t8, fs, scratch); mtc1(t8, fd); } -void TurboAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs, +void MacroAssembler::Trunc_ul_d(FPURegister fd, FPURegister fs, FPURegister scratch, Register result) { BlockTrampolinePoolScope block_trampoline_pool(this); Trunc_ul_d(t8, fs, scratch, result); dmtc1(t8, fd); } -void TurboAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs, +void MacroAssembler::Trunc_ul_s(FPURegister fd, FPURegister fs, FPURegister scratch, Register result) { BlockTrampolinePoolScope block_trampoline_pool(this); Trunc_ul_s(t8, fs, scratch, result); @@ -2304,7 +2304,7 @@ void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { ceil_w_d(fd, fs); } -void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, +void MacroAssembler::Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch) { DCHECK(fs != scratch); DCHECK(rd != at); @@ -2340,7 +2340,7 @@ void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, bind(&done); } -void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, +void MacroAssembler::Trunc_uw_s(Register rd, FPURegister fs, FPURegister scratch) { DCHECK(fs != scratch); DCHECK(rd != at); @@ -2375,7 +2375,7 @@ void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, bind(&done); } -void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, +void MacroAssembler::Trunc_ul_d(Register rd, FPURegister fs, FPURegister scratch, Register result) { DCHECK(fs != scratch); DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at)); @@ -2430,7 +2430,7 @@ void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, bind(&fail); } -void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, +void MacroAssembler::Trunc_ul_s(Register rd, FPURegister fs, FPURegister scratch, Register result) { DCHECK(fs != scratch); DCHECK(result.is_valid() ? !AreAliased(rd, result, at) : !AreAliased(rd, at)); @@ -2490,7 +2490,7 @@ void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, } template -void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, +void MacroAssembler::RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, RoundFunc round) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -2522,36 +2522,36 @@ void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, } } -void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Floor_d_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_floor, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->floor_l_d(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->floor_l_d(dst, src); }); } -void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Ceil_d_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_ceil, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->ceil_l_d(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->ceil_l_d(dst, src); }); } -void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Trunc_d_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_trunc, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->trunc_l_d(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->trunc_l_d(dst, src); }); } -void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) { +void MacroAssembler::Round_d_d(FPURegister dst, FPURegister src) { RoundDouble(dst, src, mode_round, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->round_l_d(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->round_l_d(dst, src); }); } template -void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, +void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, RoundFunc round) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -2586,35 +2586,35 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, } } -void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Floor_s_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_floor, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->floor_w_s(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->floor_w_s(dst, src); }); } -void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Ceil_s_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_ceil, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->ceil_w_s(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->ceil_w_s(dst, src); }); } -void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Trunc_s_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_trunc, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->trunc_w_s(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->trunc_w_s(dst, src); }); } -void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) { +void MacroAssembler::Round_s_s(FPURegister dst, FPURegister src) { RoundFloat(dst, src, mode_round, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->round_w_s(dst, src); + [](MacroAssembler* masm, FPURegister dst, FPURegister src) { + masm->round_w_s(dst, src); }); } -void TurboAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx, +void MacroAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx, MemOperand src) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -2640,7 +2640,7 @@ void TurboAssembler::LoadLane(MSASize sz, MSARegister dst, uint8_t laneidx, } } -void TurboAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx, +void MacroAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx, MemOperand dst) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -2684,7 +2684,7 @@ void TurboAssembler::StoreLane(MSASize sz, MSARegister src, uint8_t laneidx, dotp_instr(dst, kSimd128ScratchReg, kSimd128RegZero); \ break; -void TurboAssembler::ExtMulLow(MSADataType type, MSARegister dst, +void MacroAssembler::ExtMulLow(MSADataType type, MSARegister dst, MSARegister src1, MSARegister src2) { switch (type) { EXT_MUL_BINOP(MSAS8, ilvr_b, dotp_s_h) @@ -2698,7 +2698,7 @@ void TurboAssembler::ExtMulLow(MSADataType type, MSARegister dst, } } -void TurboAssembler::ExtMulHigh(MSADataType type, MSARegister dst, +void MacroAssembler::ExtMulHigh(MSADataType type, MSARegister dst, MSARegister src1, MSARegister src2) { switch (type) { EXT_MUL_BINOP(MSAS8, ilvl_b, dotp_s_h) @@ -2713,7 +2713,7 @@ void TurboAssembler::ExtMulHigh(MSADataType type, MSARegister dst, } #undef EXT_MUL_BINOP -void TurboAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) { +void MacroAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); switch (sz) { @@ -2738,7 +2738,7 @@ void TurboAssembler::LoadSplat(MSASize sz, MSARegister dst, MemOperand src) { } } -void TurboAssembler::ExtAddPairwise(MSADataType type, MSARegister dst, +void MacroAssembler::ExtAddPairwise(MSADataType type, MSARegister dst, MSARegister src) { switch (type) { case MSAS8: @@ -2758,7 +2758,7 @@ void TurboAssembler::ExtAddPairwise(MSADataType type, MSARegister dst, } } -void TurboAssembler::MSARoundW(MSARegister dst, MSARegister src, +void MacroAssembler::MSARoundW(MSARegister dst, MSARegister src, FPURoundingMode mode) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -2774,7 +2774,7 @@ void TurboAssembler::MSARoundW(MSARegister dst, MSARegister src, ctcmsa(MSACSR, scratch); } -void TurboAssembler::MSARoundD(MSARegister dst, MSARegister src, +void MacroAssembler::MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode) { BlockTrampolinePoolScope block_trampoline_pool(this); Register scratch = t8; @@ -2818,7 +2818,7 @@ void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, sub_d(fd, scratch, fr); } -void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc, +void MacroAssembler::CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { if (kArchVariant == kMips64r6) { sizeField = sizeField == D ? L : W; @@ -2829,12 +2829,12 @@ void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc, } } -void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, +void MacroAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, FPURegister cmp2) { CompareF(sizeField, UN, cmp1, cmp2); } -void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) { +void MacroAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) { if (kArchVariant == kMips64r6) { bc1nez(target, kDoubleCompareReg); } else { @@ -2845,7 +2845,7 @@ void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) { } } -void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) { +void MacroAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) { if (kArchVariant == kMips64r6) { bc1eqz(target, kDoubleCompareReg); } else { @@ -2856,7 +2856,7 @@ void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) { } } -void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) { +void MacroAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) { bool long_branch = target->is_bound() ? !is_near(target) : is_trampoline_emitted(); if (long_branch) { @@ -2869,7 +2869,7 @@ void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) { } } -void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) { +void MacroAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) { bool long_branch = target->is_bound() ? !is_near(target) : is_trampoline_emitted(); if (long_branch) { @@ -2882,7 +2882,7 @@ void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) { } } -void TurboAssembler::BranchMSA(Label* target, MSABranchDF df, +void MacroAssembler::BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd) { { @@ -2904,7 +2904,7 @@ void TurboAssembler::BranchMSA(Label* target, MSABranchDF df, } } -void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target, +void MacroAssembler::BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, MSARegister wt, BranchDelaySlot bd) { if (IsEnabled(MIPS_SIMD)) { @@ -2961,7 +2961,7 @@ void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target, } } -void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { +void MacroAssembler::FmoveLow(FPURegister dst, Register src_low) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); DCHECK(src_low != scratch); @@ -2970,14 +2970,14 @@ void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { mthc1(scratch, dst); } -void TurboAssembler::Move(FPURegister dst, uint32_t src) { +void MacroAssembler::Move(FPURegister dst, uint32_t src) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(static_cast(src))); mtc1(scratch, dst); } -void TurboAssembler::Move(FPURegister dst, uint64_t src) { +void MacroAssembler::Move(FPURegister dst, uint64_t src) { // Handle special values first. if (src == base::bit_cast(0.0) && has_double_zero_reg_set_) { mov_d(dst, kDoubleRegZero); @@ -3011,7 +3011,7 @@ void TurboAssembler::Move(FPURegister dst, uint64_t src) { } } -void TurboAssembler::Movz(Register rd, Register rs, Register rt) { +void MacroAssembler::Movz(Register rd, Register rs, Register rt) { if (kArchVariant == kMips64r6) { Label done; Branch(&done, ne, rt, Operand(zero_reg)); @@ -3022,7 +3022,7 @@ void TurboAssembler::Movz(Register rd, Register rs, Register rt) { } } -void TurboAssembler::Movn(Register rd, Register rs, Register rt) { +void MacroAssembler::Movn(Register rd, Register rs, Register rt) { if (kArchVariant == kMips64r6) { Label done; Branch(&done, eq, rt, Operand(zero_reg)); @@ -3033,7 +3033,7 @@ void TurboAssembler::Movn(Register rd, Register rs, Register rt) { } } -void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs, +void MacroAssembler::LoadZeroOnCondition(Register rd, Register rs, const Operand& rt, Condition cond) { BlockTrampolinePoolScope block_trampoline_pool(this); switch (cond) { @@ -3125,7 +3125,7 @@ void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs, } } -void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, +void MacroAssembler::LoadZeroIfConditionNotZero(Register dest, Register condition) { if (kArchVariant == kMips64r6) { seleqz(dest, dest, condition); @@ -3134,7 +3134,7 @@ void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, } } -void TurboAssembler::LoadZeroIfConditionZero(Register dest, +void MacroAssembler::LoadZeroIfConditionZero(Register dest, Register condition) { if (kArchVariant == kMips64r6) { selnez(dest, dest, condition); @@ -3143,7 +3143,7 @@ void TurboAssembler::LoadZeroIfConditionZero(Register dest, } } -void TurboAssembler::LoadZeroIfFPUCondition(Register dest) { +void MacroAssembler::LoadZeroIfFPUCondition(Register dest) { if (kArchVariant == kMips64r6) { dmfc1(kScratchReg, kDoubleCompareReg); LoadZeroIfConditionNotZero(dest, kScratchReg); @@ -3152,7 +3152,7 @@ void TurboAssembler::LoadZeroIfFPUCondition(Register dest) { } } -void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) { +void MacroAssembler::LoadZeroIfNotFPUCondition(Register dest) { if (kArchVariant == kMips64r6) { dmfc1(kScratchReg, kDoubleCompareReg); LoadZeroIfConditionZero(dest, kScratchReg); @@ -3161,19 +3161,19 @@ void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) { } } -void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) { +void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { movt(rd, rs, cc); } -void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) { +void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { movf(rd, rs, cc); } -void TurboAssembler::Clz(Register rd, Register rs) { clz(rd, rs); } +void MacroAssembler::Clz(Register rd, Register rs) { clz(rd, rs); } -void TurboAssembler::Dclz(Register rd, Register rs) { dclz(rd, rs); } +void MacroAssembler::Dclz(Register rd, Register rs) { dclz(rd, rs); } -void TurboAssembler::Ctz(Register rd, Register rs) { +void MacroAssembler::Ctz(Register rd, Register rs) { if (kArchVariant == kMips64r6) { // We don't have an instruction to count the number of trailing zeroes. // Start by flipping the bits end-for-end so we can count the number of @@ -3199,7 +3199,7 @@ void TurboAssembler::Ctz(Register rd, Register rs) { } } -void TurboAssembler::Dctz(Register rd, Register rs) { +void MacroAssembler::Dctz(Register rd, Register rs) { if (kArchVariant == kMips64r6) { // We don't have an instruction to count the number of trailing zeroes. // Start by flipping the bits end-for-end so we can count the number of @@ -3225,7 +3225,7 @@ void TurboAssembler::Dctz(Register rd, Register rs) { } } -void TurboAssembler::Popcnt(Register rd, Register rs) { +void MacroAssembler::Popcnt(Register rd, Register rs) { ASM_CODE_COMMENT(this); // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel // @@ -3277,7 +3277,7 @@ void TurboAssembler::Popcnt(Register rd, Register rs) { srl(rd, rd, shift); } -void TurboAssembler::Dpopcnt(Register rd, Register rs) { +void MacroAssembler::Dpopcnt(Register rd, Register rs) { ASM_CODE_COMMENT(this); uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3 @@ -3307,7 +3307,7 @@ void TurboAssembler::Dpopcnt(Register rd, Register rs) { dsrl32(rd, rd, shift); } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { DoubleRegister single_scratch = kScratchDoubleReg.low(); @@ -3327,7 +3327,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result, Branch(done, eq, scratch, Operand(zero_reg)); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -3365,19 +3365,19 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) -void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) { +void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) { DCHECK_EQ(kArchVariant, kMips64r6 ? is_int26(offset) : is_int16(offset)); BranchShort(offset, bdslot); } -void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs, +void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); DCHECK(is_near); USE(is_near); } -void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near_branch(L)) { BranchShort(L, bdslot); @@ -3393,7 +3393,7 @@ void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) { } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, +void MacroAssembler::Branch(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { if (L->is_bound()) { if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) { @@ -3424,7 +3424,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs, } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, +void MacroAssembler::Branch(Label* L, Condition cond, Register rs, RootIndex index, BranchDelaySlot bdslot) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3432,7 +3432,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs, Branch(L, cond, rs, Operand(scratch), bdslot); } -void TurboAssembler::BranchShortHelper(int16_t offset, Label* L, +void MacroAssembler::BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset16); @@ -3442,13 +3442,13 @@ void TurboAssembler::BranchShortHelper(int16_t offset, Label* L, if (bdslot == PROTECT) nop(); } -void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) { +void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset26); bc(offset); } -void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { +void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT) { DCHECK(is_int26(offset)); BranchShortHelperR6(offset, nullptr); @@ -3458,7 +3458,7 @@ void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { } } -void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT) { BranchShortHelperR6(0, L); } else { @@ -3466,7 +3466,7 @@ void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { } } -int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { +int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { if (L) { offset = branch_offset_helper(L, bits) >> 2; } else { @@ -3475,7 +3475,7 @@ int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { return offset; } -Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, +Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt, Register scratch) { Register r2 = no_reg; if (rt.is_reg()) { @@ -3488,14 +3488,14 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, return r2; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, +bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits) { if (!is_near(L, bits)) return false; *offset = GetOffset(*offset, L, bits); return true; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, +bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, Register* scratch, const Operand& rt) { if (!is_near(L, bits)) return false; *scratch = GetRtAsRegisterHelper(rt, *scratch); @@ -3503,7 +3503,7 @@ bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, return true; } -bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, +bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { DCHECK(L == nullptr || offset == 0); @@ -3716,7 +3716,7 @@ bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, return true; } -bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond, +bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { DCHECK(L == nullptr || offset == 0); @@ -3853,7 +3853,7 @@ bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond, return true; } -bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, +bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BRANCH_ARGS_CHECK(cond, rs, rt); @@ -3876,28 +3876,28 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, } } -void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs, +void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); } -void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs, +void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { BranchShortCheck(0, L, cond, rs, rt, bdslot); } -void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) { +void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) { BranchAndLinkShort(offset, bdslot); } -void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, +void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot); DCHECK(is_near); USE(is_near); } -void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { if (L->is_bound()) { if (is_near_branch(L)) { BranchAndLinkShort(L, bdslot); @@ -3913,7 +3913,7 @@ void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { } } -void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, +void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { if (L->is_bound()) { if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) { @@ -3936,7 +3936,7 @@ void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, } } -void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, +void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset16); @@ -3946,13 +3946,13 @@ void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, if (bdslot == PROTECT) nop(); } -void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) { +void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset26); balc(offset); } -void TurboAssembler::BranchAndLinkShort(int32_t offset, +void MacroAssembler::BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT) { DCHECK(is_int26(offset)); @@ -3963,7 +3963,7 @@ void TurboAssembler::BranchAndLinkShort(int32_t offset, } } -void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT) { BranchAndLinkShortHelperR6(0, L); } else { @@ -3971,7 +3971,7 @@ void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { } } -bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { DCHECK(L == nullptr || offset == 0); @@ -4113,7 +4113,7 @@ bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly // with the slt instructions. We could use sub or add instead but we would miss // overflow cases, so we keep slt and add an intermediate third instruction. -bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { @@ -4203,7 +4203,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, return true; } -bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bdslot) { @@ -4227,7 +4227,7 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { ASM_CODE_COMMENT(this); DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -4237,11 +4237,11 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, FixedArray::kHeaderSize + constant_index * kPointerSize)); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { Ld(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Move(destination, kRootRegister); @@ -4250,7 +4250,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -4279,7 +4279,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond, Register rs, +void MacroAssembler::Jump(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); if (kArchVariant == kMips64r6 && bd == PROTECT) { @@ -4303,7 +4303,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rs, } } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { Label skip; @@ -4320,13 +4320,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, } } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond, rs, rt, bd); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { DCHECK(RelocInfo::IsCodeTarget(rmode)); @@ -4347,13 +4347,13 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { li(t9, reference); Jump(t9); } // Note: To call gcc-compiled C code on mips, you must call through t9. -void TurboAssembler::Call(Register target, Condition cond, Register rs, +void MacroAssembler::Call(Register target, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); if (kArchVariant == kMips64r6 && bd == PROTECT) { @@ -4392,14 +4392,14 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, } } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); li(t9, Operand(static_cast(target), rmode), ADDRESS_LOAD); Call(t9, cond, rs, rt, bd); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -4412,7 +4412,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(code.address(), rmode, cond, rs, rt, bd); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { ASM_CODE_COMMENT(this); static_assert(kSystemPointerSize == 8); static_assert(kSmiTagSize == 1); @@ -4424,22 +4424,22 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { Ld(builtin_index, MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { Ld(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { ASM_CODE_COMMENT(this); LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); Register temp = t9; switch (options().builtin_call_jump_mode) { @@ -4465,7 +4465,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); Register temp = t9; @@ -4492,7 +4492,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -void TurboAssembler::PatchAndJump(Address target) { +void MacroAssembler::PatchAndJump(Address target) { if (kArchVariant != kMips64r6) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -4512,7 +4512,7 @@ void TurboAssembler::PatchAndJump(Address target) { } } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { ASM_CODE_COMMENT(this); // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. @@ -4554,12 +4554,12 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); } -void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt, +void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt, BranchDelaySlot bd) { Jump(ra, cond, rs, rt, bd); } -void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT && (!L->is_bound() || is_near_r6(L))) { BranchShortHelperR6(0, L); @@ -4583,7 +4583,7 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { } } -void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) { +void MacroAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT && (is_int26(offset))) { BranchShortHelperR6(offset, nullptr); } else { @@ -4602,7 +4602,7 @@ void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) { } } -void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { +void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { if (kArchVariant == kMips64r6 && bdslot == PROTECT && (!L->is_bound() || is_near_r6(L))) { BranchAndLinkShortHelperR6(0, L); @@ -4622,7 +4622,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { } } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode, Register scratch) { switch (type) { case kCountIsInteger: { @@ -4646,7 +4646,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode, @@ -4662,7 +4662,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::DropAndRet(int drop) { +void MacroAssembler::DropAndRet(int drop) { int32_t drop_size = drop * kSystemPointerSize; DCHECK(is_int31(drop_size)); @@ -4678,7 +4678,7 @@ void TurboAssembler::DropAndRet(int drop) { } } -void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, +void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1, const Operand& r2) { // Both Drop and Ret need to be conditional. Label skip; @@ -4694,7 +4694,7 @@ void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, } } -void TurboAssembler::Drop(int count, Condition cond, Register reg, +void MacroAssembler::Drop(int count, Condition cond, Register reg, const Operand& op) { if (count <= 0) { return; @@ -4725,28 +4725,28 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { } } -void TurboAssembler::Call(Label* target) { BranchAndLink(target); } +void MacroAssembler::Call(Label* target) { BranchAndLink(target); } -void TurboAssembler::LoadAddress(Register dst, Label* target) { +void MacroAssembler::LoadAddress(Register dst, Label* target) { uint64_t address = jump_address(target); li(dst, address); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(smi)); push(scratch); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(handle)); push(scratch); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order) { DCHECK(!AreAliased(array, size, scratch, scratch2)); Label loop, entry; @@ -4806,12 +4806,12 @@ void MacroAssembler::PopStackHandler() { Sd(a1, MemOperand(scratch)); } -void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { sub_d(dst, src, kDoubleRegZero); } -void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { if (IsMipsSoftFloatABI) { if (kArchEndian == kLittle) { Move(dst, v0, v1); @@ -4823,7 +4823,7 @@ void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { } } -void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { if (IsMipsSoftFloatABI) { if (kArchEndian == kLittle) { Move(dst, a0, a1); @@ -4835,7 +4835,7 @@ void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { } } -void TurboAssembler::MovToFloatParameter(DoubleRegister src) { +void MacroAssembler::MovToFloatParameter(DoubleRegister src) { if (!IsMipsSoftFloatABI) { Move(f12, src); } else { @@ -4847,7 +4847,7 @@ void TurboAssembler::MovToFloatParameter(DoubleRegister src) { } } -void TurboAssembler::MovToFloatResult(DoubleRegister src) { +void MacroAssembler::MovToFloatResult(DoubleRegister src) { if (!IsMipsSoftFloatABI) { Move(f0, src); } else { @@ -4859,7 +4859,7 @@ void TurboAssembler::MovToFloatResult(DoubleRegister src) { } } -void TurboAssembler::MovToFloatParameters(DoubleRegister src1, +void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { if (!IsMipsSoftFloatABI) { const DoubleRegister fparg2 = f13; @@ -4893,10 +4893,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); Ld(destination, MemOperand(kRootRegister, static_cast(offset))); } @@ -5139,7 +5139,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg, // ----------------------------------------------------------------------------- // Runtime calls. -void TurboAssembler::DaddOverflow(Register dst, Register left, +void MacroAssembler::DaddOverflow(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5170,7 +5170,7 @@ void TurboAssembler::DaddOverflow(Register dst, Register left, } } -void TurboAssembler::DsubOverflow(Register dst, Register left, +void MacroAssembler::DsubOverflow(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5201,7 +5201,7 @@ void TurboAssembler::DsubOverflow(Register dst, Register left, } } -void TurboAssembler::MulOverflow(Register dst, Register left, +void MacroAssembler::MulOverflow(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5231,7 +5231,7 @@ void TurboAssembler::MulOverflow(Register dst, Register left, xor_(overflow, overflow, scratch); } -void TurboAssembler::DMulOverflow(Register dst, Register left, +void MacroAssembler::DMulOverflow(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5347,10 +5347,10 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, // ----------------------------------------------------------------------------- // Debugging. -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Check(Condition cc, AbortReason reason, Register rs, Operand rt) { Label L; Branch(&L, cc, rs, rt); @@ -5359,7 +5359,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -5416,7 +5416,7 @@ void TurboAssembler::Abort(AbortReason reason) { } } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { Ld(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } @@ -5427,16 +5427,16 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { Ld(dst, MemOperand(dst, Context::SlotOffset(index))); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(StackFrame::TypeToMarker(type))); PushCommonFrame(scratch); } -void TurboAssembler::Prologue() { PushStandardFrame(a1); } +void MacroAssembler::Prologue() { PushStandardFrame(a1); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); BlockTrampolinePoolScope block_trampoline_pool(this); Push(ra, fp); @@ -5451,7 +5451,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); daddiu(sp, fp, 2 * kPointerSize); Ld(ra, MemOperand(fp, 1 * kPointerSize)); @@ -5568,7 +5568,7 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return, daddiu(sp, sp, 2 * kPointerSize); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -5584,7 +5584,7 @@ int TurboAssembler::ActivationFrameAlignment() { #endif // V8_HOST_ARCH_MIPS } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { if (SmiValuesAre32Bits()) { Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset()))); } else { @@ -5594,7 +5594,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label, BranchDelaySlot bd) { DCHECK_EQ(0, kSmiTag); UseScratchRegisterScope temps(this); @@ -5614,12 +5614,12 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs, Operand rt) { if (v8_flags.debug_code) Check(cc, reason, rs, rt); } -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -5630,7 +5630,7 @@ void TurboAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -5760,7 +5760,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, #endif // V8_ENABLE_DEBUG_CODE -void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -5806,12 +5806,12 @@ void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, } } -void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { add_s(dst, src1, src2); } -void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -5857,12 +5857,12 @@ void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, } } -void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { add_s(dst, src1, src2); } -void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -5907,12 +5907,12 @@ void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, } } -void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { add_d(dst, src1, src2); } -void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1, FPURegister src2, Label* out_of_line) { ASM_CODE_COMMENT(this); if (src1 == src2) { @@ -5957,14 +5957,14 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, } } -void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2) { add_d(dst, src1, src2); } static const int kRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; int num_args = num_reg_arguments + num_double_arguments; @@ -5977,7 +5977,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { ASM_CODE_COMMENT(this); @@ -6005,12 +6005,12 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, } } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); @@ -6019,22 +6019,22 @@ void TurboAssembler::CallCFunction(ExternalReference function, CallCFunctionHelper(t9, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { ASM_CODE_COMMENT(this); CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); @@ -6129,7 +6129,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, #undef BRANCH_ARGS_CHECK -void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, +void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met) { ASM_CODE_COMMENT(this); And(scratch, object, Operand(~kPageAlignmentMask)); @@ -6153,7 +6153,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { // This push on ra and the pop below together ensure that we restore the // register ra, which is needed while computing the code start address. push(ra); @@ -6173,7 +6173,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { pop(ra); // Restore ra } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -6186,14 +6186,14 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::LoadCodeEntry(Register destination, +void MacroAssembler::LoadCodeEntry(Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); Ld(destination, FieldMemOperand(code_data_container_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin( +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin( Register destination, Register code_data_container_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -6203,13 +6203,13 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin( Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_data_container_object) { +void MacroAssembler::CallCodeObject(Register code_data_container_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_data_container_object, code_data_container_object); Call(code_data_container_object); } -void TurboAssembler::JumpCodeObject(Register code_data_container_object, +void MacroAssembler::JumpCodeObject(Register code_data_container_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); diff --git a/src/codegen/mips64/macro-assembler-mips64.h b/src/codegen/mips64/macro-assembler-mips64.h index c997872ebb..2730002ff0 100644 --- a/src/codegen/mips64/macro-assembler-mips64.h +++ b/src/codegen/mips64/macro-assembler-mips64.h @@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) { return MemOperand(sp, offset); } -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; // Activation support. void EnterFrame(StackFrame::Type type); @@ -913,79 +913,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Define an exception handler and bind a label. void BindExceptionHandler(Label* label) { bind(label); } - protected: - inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); - inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); - - private: - bool has_double_zero_reg_set_ = false; - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it - // succeeds, otherwise falls through if result is saturated. On return - // 'result' either holds answer, or is clobbered on fall through. - void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, - Label* done); - - void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, - FPURegister cmp2); - - void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, - FPURegister cmp2); - - void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, - MSARegister wt, BranchDelaySlot bd = PROTECT); - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); - - // TODO(mips) Reorder parameters so out parameters come last. - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, - Register* scratch, const Operand& rt); - - void BranchShortHelperR6(int32_t offset, Label* L); - void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); - bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot); - bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot); - - void BranchAndLinkShortHelperR6(int32_t offset, Label* L); - void BranchAndLinkShortHelper(int16_t offset, Label* L, - BranchDelaySlot bdslot); - void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT); - void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT); - bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond, - Register rs, const Operand& rt, - BranchDelaySlot bdslot); - bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt, - BranchDelaySlot bdslot); - void BranchLong(Label* L, BranchDelaySlot bdslot); - void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot); - - template - void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, - RoundFunc round); - - template - void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, - RoundFunc round); - - // Push a fixed frame, consisting of ra, fp. - void PushCommonFrame(Register marker_reg = no_reg); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. // TODO(victorgomes): Remove this function once we stick with the reversed @@ -1087,9 +1014,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, FPURegister scratch); - void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, - MSARegister wt, BranchDelaySlot bd = PROTECT); - // Enter exit frame. // argc - argument count to be dropped by LeaveExitFrame. // stack_space - extra stack space. @@ -1269,17 +1193,83 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { DecodeField(reg, reg); } + protected: + inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); + inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); + private: + bool has_double_zero_reg_set_ = false; + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, InvokeType type); + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, + Label* done); + + void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, + FPURegister cmp2); + + void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, + FPURegister cmp2); + + void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, + MSARegister wt, BranchDelaySlot bd = PROTECT); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + + // TODO(mips) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); + + void BranchShortHelperR6(int32_t offset, Label* L); + void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); + bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot); + bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, + const Operand& rt, BranchDelaySlot bdslot); + + void BranchAndLinkShortHelperR6(int32_t offset, Label* L); + void BranchAndLinkShortHelper(int16_t offset, Label* L, + BranchDelaySlot bdslot); + void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT); + void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT); + bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond, + Register rs, const Operand& rt, + BranchDelaySlot bdslot); + bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt, + BranchDelaySlot bdslot); + void BranchLong(Label* L, BranchDelaySlot bdslot); + void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot); + + template + void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, + RoundFunc round); + + template + void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, + RoundFunc round); + + // Push a fixed frame, consisting of ra, fp. + void PushCommonFrame(Register marker_reg = no_reg); + DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); }; template -void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, +void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count, Func GetLabelFunction) { // Ensure that dd-ed labels following this instruction use 8 bytes aligned // addresses. diff --git a/src/codegen/ppc/assembler-ppc-inl.h b/src/codegen/ppc/assembler-ppc-inl.h index 3858595051..0d6f2b46db 100644 --- a/src/codegen/ppc/assembler-ppc-inl.h +++ b/src/codegen/ppc/assembler-ppc-inl.h @@ -148,7 +148,7 @@ Handle Assembler::code_target_object_handle_at(Address pc, HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); if (IsCompressedEmbeddedObject(rmode_)) { - return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny( + return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged( cage_base, Assembler::target_compressed_address_at(pc_, constant_pool_)))); } else { diff --git a/src/codegen/ppc/assembler-ppc.h b/src/codegen/ppc/assembler-ppc.h index 497af98ac2..6fd11e4a0f 100644 --- a/src/codegen/ppc/assembler-ppc.h +++ b/src/codegen/ppc/assembler-ppc.h @@ -1570,7 +1570,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { private: friend class Assembler; - friend class TurboAssembler; + friend class MacroAssembler; Assembler* assembler_; RegList old_available_; diff --git a/src/codegen/ppc/constants-ppc.h b/src/codegen/ppc/constants-ppc.h index 8d00d36570..19878b502b 100644 --- a/src/codegen/ppc/constants-ppc.h +++ b/src/codegen/ppc/constants-ppc.h @@ -151,7 +151,7 @@ enum Condition { kNotZero = 16, }; -inline Condition check_condition(Condition cond) { +inline Condition to_condition(Condition cond) { switch (cond) { case kUnsignedLessThan: return lt; @@ -171,6 +171,31 @@ inline Condition check_condition(Condition cond) { return cond; } +inline bool is_signed(Condition cond) { + switch (cond) { + case kEqual: + case kNotEqual: + case kLessThan: + case kGreaterThan: + case kLessThanEqual: + case kGreaterThanEqual: + case kOverflow: + case kNoOverflow: + case kZero: + case kNotZero: + return true; + + case kUnsignedLessThan: + case kUnsignedGreaterThan: + case kUnsignedLessThanEqual: + case kUnsignedGreaterThanEqual: + return false; + + default: + UNREACHABLE(); + } +} + inline Condition NegateCondition(Condition cond) { DCHECK(cond != al); return static_cast(cond ^ ne); diff --git a/src/codegen/ppc/macro-assembler-ppc.cc b/src/codegen/ppc/macro-assembler-ppc.cc index c45a843aed..73acf16d08 100644 --- a/src/codegen/ppc/macro-assembler-ppc.cc +++ b/src/codegen/ppc/macro-assembler-ppc.cc @@ -55,7 +55,7 @@ constexpr int kStackSavedSavedFPSizeInBytes = } // namespace -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -72,7 +72,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, Register scratch2, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -91,7 +91,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, Register scratch2, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -109,29 +109,28 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch1, return bytes; } -void TurboAssembler::Jump(Register target) { +void MacroAssembler::Jump(Register target) { mtctr(target); bctr(); } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); DCHECK_NE(destination, r0); LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); - LoadTaggedPointerField( - destination, - FieldMemOperand(destination, - FixedArray::OffsetOfElementAt(constant_index)), - r0); + LoadTaggedField(destination, + FieldMemOperand(destination, FixedArray::OffsetOfElementAt( + constant_index)), + r0); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { LoadU64(destination, MemOperand(kRootRegister, offset), r0); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { mr(destination, kRootRegister); @@ -140,7 +139,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -170,7 +169,7 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, CRegister cr) { Label skip; @@ -183,13 +182,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, CRegister cr) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond, cr); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond, CRegister cr) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -204,7 +203,7 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Jump(static_cast(target_index), rmode, cond, cr); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); Move(scratch, reference); @@ -218,7 +217,7 @@ void TurboAssembler::Jump(const ExternalReference& reference) { Jump(scratch); } -void TurboAssembler::Call(Register target) { +void MacroAssembler::Call(Register target) { BlockTrampolinePoolScope block_trampoline_pool(this); // branch via link register and set LK bit for return point mtctr(target); @@ -236,7 +235,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Address target, return (2 + kMovInstructionsNoConstantPool) * kInstrSize; } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(cond == al); @@ -252,7 +251,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, bctrl(); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(RelocInfo::IsCodeTarget(rmode)); @@ -268,7 +267,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, Call(static_cast
(target_index), rmode, cond); } -void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not // preserve scratch registers across calls. @@ -309,7 +308,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond, +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond, CRegister cr) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); @@ -352,13 +351,13 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond, } } -void TurboAssembler::Drop(int count) { +void MacroAssembler::Drop(int count) { if (count > 0) { AddS64(sp, sp, Operand(count * kSystemPointerSize), r0); } } -void TurboAssembler::Drop(Register count, Register scratch) { +void MacroAssembler::Drop(Register count, Register scratch) { ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2)); add(sp, sp, scratch); } @@ -376,19 +375,19 @@ Operand MacroAssembler::ClearedValue() const { static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); } -void TurboAssembler::Call(Label* target) { b(target, SetLK); } +void MacroAssembler::Call(Label* target) { b(target, SetLK); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { mov(r0, Operand(handle)); push(r0); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { mov(r0, Operand(smi)); push(r0); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order) { Label loop, done; @@ -420,7 +419,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -void TurboAssembler::Move(Register dst, Handle value, +void MacroAssembler::Move(Register dst, Handle value, RelocInfo::Mode rmode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than @@ -438,7 +437,7 @@ void TurboAssembler::Move(Register dst, Handle value, } } -void TurboAssembler::Move(Register dst, ExternalReference reference) { +void MacroAssembler::Move(Register dst, ExternalReference reference) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -449,20 +448,20 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) { mov(dst, Operand(reference)); } -void TurboAssembler::Move(Register dst, Register src, Condition cond) { +void MacroAssembler::Move(Register dst, Register src, Condition cond) { DCHECK(cond == al); if (dst != src) { mr(dst, src); } } -void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { if (dst != src) { fmr(dst, src); } } -void TurboAssembler::MultiPush(RegList regs, Register location) { +void MacroAssembler::MultiPush(RegList regs, Register location) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kSystemPointerSize; @@ -475,7 +474,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) { } } -void TurboAssembler::MultiPop(RegList regs, Register location) { +void MacroAssembler::MultiPop(RegList regs, Register location) { int16_t stack_offset = 0; for (int16_t i = 0; i < Register::kNumRegisters; i++) { @@ -487,7 +486,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) { addi(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { +void MacroAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { int16_t num_to_push = dregs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -501,7 +500,7 @@ void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { } } -void TurboAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch, +void MacroAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch, Register location) { int16_t num_to_push = simd_regs.Count(); int16_t stack_offset = num_to_push * kSimd128Size; @@ -516,7 +515,7 @@ void TurboAssembler::MultiPushV128(Simd128RegList simd_regs, Register scratch, } } -void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { +void MacroAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { int16_t stack_offset = 0; for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) { @@ -529,7 +528,7 @@ void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { addi(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch, +void MacroAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch, Register location) { int16_t stack_offset = 0; @@ -543,7 +542,7 @@ void TurboAssembler::MultiPopV128(Simd128RegList simd_regs, Register scratch, addi(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPushF64AndV128(DoubleRegList dregs, +void MacroAssembler::MultiPushF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location) { @@ -580,7 +579,7 @@ void TurboAssembler::MultiPushF64AndV128(DoubleRegList dregs, #endif } -void TurboAssembler::MultiPopF64AndV128(DoubleRegList dregs, +void MacroAssembler::MultiPopF64AndV128(DoubleRegList dregs, Simd128RegList simd_regs, Register scratch1, Register scratch2, Register location) { @@ -611,7 +610,7 @@ void TurboAssembler::MultiPopF64AndV128(DoubleRegList dregs, MultiPopDoubles(dregs); } -void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { mov(destination, Operand(ReadOnlyRootPtr(index), RelocInfo::Mode::NO_INFO)); @@ -620,38 +619,28 @@ void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { LoadRoot(destination, index); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition cond) { DCHECK(cond == al); if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { - DecompressTaggedPointer(destination, ReadOnlyRootPtr(index)); + DecompressTagged(destination, ReadOnlyRootPtr(index)); return; } LoadU64(destination, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0); } -void TurboAssembler::LoadTaggedPointerField(const Register& destination, - const MemOperand& field_operand, - const Register& scratch) { +void MacroAssembler::LoadTaggedField(const Register& destination, + const MemOperand& field_operand, + const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { - DecompressTaggedPointer(destination, field_operand); + DecompressTagged(destination, field_operand); } else { LoadU64(destination, field_operand, scratch); } } -void TurboAssembler::LoadAnyTaggedField(const Register& destination, - const MemOperand& field_operand, - const Register& scratch) { - if (COMPRESS_POINTERS_BOOL) { - DecompressAnyTagged(destination, field_operand); - } else { - LoadU64(destination, field_operand, scratch); - } -} - -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc, +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc, Register scratch) { if (SmiValuesAre31Bits()) { LoadU32(dst, src, scratch); @@ -662,7 +651,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc, SmiUntag(dst, rc); } -void TurboAssembler::StoreTaggedField(const Register& value, +void MacroAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand, const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -674,60 +663,43 @@ void TurboAssembler::StoreTaggedField(const Register& value, } } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, Register src) { RecordComment("[ DecompressTaggedSigned"); ZeroExtWord32(destination, src); RecordComment("]"); } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, MemOperand field_operand) { RecordComment("[ DecompressTaggedSigned"); LoadU32(destination, field_operand, r0); RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(Register destination, - Register source) { - RecordComment("[ DecompressTaggedPointer"); +void MacroAssembler::DecompressTagged(Register destination, Register source) { + RecordComment("[ DecompressTagged"); ZeroExtWord32(destination, source); add(destination, destination, kPtrComprCageBaseRegister); RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(Register destination, - MemOperand field_operand) { - RecordComment("[ DecompressTaggedPointer"); +void MacroAssembler::DecompressTagged(Register destination, + MemOperand field_operand) { + RecordComment("[ DecompressTagged"); LoadU32(destination, field_operand, r0); add(destination, destination, kPtrComprCageBaseRegister); RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, - Tagged_t immediate) { +void MacroAssembler::DecompressTagged(const Register& destination, + Tagged_t immediate) { ASM_CODE_COMMENT(this); AddS64(destination, kPtrComprCageBaseRegister, Operand(immediate, RelocInfo::Mode::NO_INFO)); } -void TurboAssembler::DecompressAnyTagged(Register destination, - MemOperand field_operand) { - RecordComment("[ DecompressAnyTagged"); - LoadU32(destination, field_operand, r0); - add(destination, destination, kPtrComprCageBaseRegister); - RecordComment("]"); -} - -void TurboAssembler::DecompressAnyTagged(Register destination, - Register source) { - RecordComment("[ DecompressAnyTagged"); - ZeroExtWord32(destination, source); - add(destination, destination, kPtrComprCageBaseRegister); - RecordComment("]"); -} - -void TurboAssembler::LoadTaggedSignedField(Register destination, +void MacroAssembler::LoadTaggedSignedField(Register destination, MemOperand field_operand, Register scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -776,17 +748,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { DCHECK(!AreAliased(object, slot_address)); @@ -809,7 +781,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -832,7 +804,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -861,7 +833,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, SaveFPRegsMode fp_mode, SmiCheck smi_check) { DCHECK(!AreAliased(object, value, slot_address)); if (v8_flags.debug_code) { - LoadTaggedPointerField(r0, MemOperand(slot_address)); + LoadTaggedField(r0, MemOperand(slot_address)); CmpS64(r0, value); Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite); } @@ -909,7 +881,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, } } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { int fp_delta = 0; mflr(r0); if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { @@ -932,7 +904,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { addi(fp, sp, Operand(fp_delta * kSystemPointerSize)); } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int fp_delta = 0; mflr(r0); if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { @@ -956,7 +928,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) { Push(kJavaScriptCallArgCountRegister); } -void TurboAssembler::RestoreFrameStateForTailCall() { +void MacroAssembler::RestoreFrameStateForTailCall() { if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { LoadU64(kConstantPoolRegister, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset)); @@ -967,61 +939,61 @@ void TurboAssembler::RestoreFrameStateForTailCall() { mtlr(r0); } -void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { // Turn potential sNaN into qNaN. fsub(dst, src, kDoubleRegZero); } -void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) { +void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) { MovIntToDouble(dst, src, r0); fcfid(dst, dst); } -void TurboAssembler::ConvertUnsignedIntToDouble(Register src, +void MacroAssembler::ConvertUnsignedIntToDouble(Register src, DoubleRegister dst) { MovUnsignedIntToDouble(dst, src, r0); fcfid(dst, dst); } -void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) { +void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) { MovIntToDouble(dst, src, r0); fcfids(dst, dst); } -void TurboAssembler::ConvertUnsignedIntToFloat(Register src, +void MacroAssembler::ConvertUnsignedIntToFloat(Register src, DoubleRegister dst) { MovUnsignedIntToDouble(dst, src, r0); fcfids(dst, dst); } #if V8_TARGET_ARCH_PPC64 -void TurboAssembler::ConvertInt64ToDouble(Register src, +void MacroAssembler::ConvertInt64ToDouble(Register src, DoubleRegister double_dst) { MovInt64ToDouble(double_dst, src); fcfid(double_dst, double_dst); } -void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src, +void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst) { MovInt64ToDouble(double_dst, src); fcfidus(double_dst, double_dst); } -void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src, +void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst) { MovInt64ToDouble(double_dst, src); fcfidu(double_dst, double_dst); } -void TurboAssembler::ConvertInt64ToFloat(Register src, +void MacroAssembler::ConvertInt64ToFloat(Register src, DoubleRegister double_dst) { MovInt64ToDouble(double_dst, src); fcfids(double_dst, double_dst); } #endif -void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, +void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, #if !V8_TARGET_ARCH_PPC64 const Register dst_hi, #endif @@ -1044,7 +1016,7 @@ void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, } #if V8_TARGET_ARCH_PPC64 -void TurboAssembler::ConvertDoubleToUnsignedInt64( +void MacroAssembler::ConvertDoubleToUnsignedInt64( const DoubleRegister double_input, const Register dst, const DoubleRegister double_dst, FPRoundingMode rounding_mode) { if (rounding_mode == kRoundToZero) { @@ -1060,7 +1032,7 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64( #endif #if !V8_TARGET_ARCH_PPC64 -void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register scratch, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1085,7 +1057,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftLeftPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1107,7 +1079,7 @@ void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high, } } -void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register scratch, Register shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1132,7 +1104,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftRightPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1154,7 +1126,7 @@ void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high, } } -void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register scratch, Register shift) { DCHECK(!AreAliased(dst_low, src_high, shift)); @@ -1178,7 +1150,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, +void MacroAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low, Register src_high, uint32_t shift) { DCHECK(!AreAliased(dst_low, src_high)); @@ -1201,7 +1173,7 @@ void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high, } #endif -void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress( +void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress( Register code_target_address) { // Builtins do not use the constant pool (see is_constant_pool_available). static_assert(InstructionStream::kOnHeapBodyIsContiguous); @@ -1217,19 +1189,19 @@ void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress( add(kConstantPoolRegister, kConstantPoolRegister, r0); } -void TurboAssembler::LoadPC(Register dst) { +void MacroAssembler::LoadPC(Register dst) { b(4, SetLK); mflr(dst); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { mflr(r0); LoadPC(dst); subi(dst, dst, Operand(pc_offset() - kInstrSize)); mtlr(r0); } -void TurboAssembler::LoadConstantPoolPointerRegister() { +void MacroAssembler::LoadConstantPoolPointerRegister() { // // Builtins do not use the constant pool (see is_constant_pool_available). static_assert(InstructionStream::kOnHeapBodyIsContiguous); @@ -1240,7 +1212,7 @@ void TurboAssembler::LoadConstantPoolPointerRegister() { ConstantPoolPosition(), delta); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { { ConstantPoolUnavailableScope constant_pool_unavailable(this); mov(r11, Operand(StackFrame::TypeToMarker(type))); @@ -1252,7 +1224,7 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) { } } -void TurboAssembler::Prologue() { +void MacroAssembler::Prologue() { PushStandardFrame(r4); if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { // base contains prologue address @@ -1261,7 +1233,7 @@ void TurboAssembler::Prologue() { } } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0; @@ -1287,7 +1259,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode) { @@ -1302,7 +1274,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::EnterFrame(StackFrame::Type type, +void MacroAssembler::EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { if (V8_EMBEDDED_CONSTANT_POOL_BOOL && load_constant_pool_pointer_reg) { // Push type explicitly so we can leverage the constant pool. @@ -1326,7 +1298,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type, #endif // V8_ENABLE_WEBASSEMBLY } -int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { +int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { ConstantPoolUnavailableScope constant_pool_unavailable(this); // r3: preserved // r4: preserved @@ -1422,7 +1394,7 @@ void MacroAssembler::EnterExitFrame(int stack_space, StoreU64(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if !defined(USE_SIMULATOR) // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1472,11 +1444,11 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, } } -void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { Move(dst, d1); } -void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { Move(dst, d1); } @@ -1487,10 +1459,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); LoadU64(destination, MemOperand(kRootRegister, offset), r0); } @@ -1645,8 +1617,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, // allow recompilation to take effect without changing any of the // call sites. Register code = kJavaScriptCallCodeStartRegister; - LoadTaggedPointerField( - code, FieldMemOperand(function, JSFunction::kCodeOffset), r0); + LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset), r0); switch (type) { case InvokeType::kCall: CallCodeObject(code); @@ -1673,10 +1644,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget( Register expected_reg = r5; Register temp_reg = r7; - LoadTaggedPointerField( + LoadTaggedField( temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0); - LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), - r0); + LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0); LoadU16(expected_reg, FieldMemOperand(temp_reg, SharedFunctionInfo::kFormalParameterCountOffset)); @@ -1696,8 +1666,7 @@ void MacroAssembler::InvokeFunction(Register function, DCHECK_EQ(function, r4); // Get the function and setup the context. - LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), - r0); + LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0); InvokeFunctionCode(r4, no_reg, expected_parameter_count, actual_parameter_count, type); @@ -1783,7 +1752,7 @@ void MacroAssembler::CompareRoot(Register obj, RootIndex index) { CmpS64(obj, r0); } -void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left, +void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch) { @@ -1815,7 +1784,7 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left, if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC); } -void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left, +void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, intptr_t right, Register overflow_dst, Register scratch) { @@ -1840,7 +1809,7 @@ void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left, } } -void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left, +void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left, Register right, Register overflow_dst, Register scratch) { @@ -1871,7 +1840,7 @@ void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left, } } -void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, DoubleRegister scratch) { Label check_zero, return_left, return_right, return_nan, done; fcmpu(lhs, rhs); @@ -1919,7 +1888,7 @@ void TurboAssembler::MinF64(DoubleRegister dst, DoubleRegister lhs, bind(&done); } -void TurboAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MaxF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, DoubleRegister scratch) { Label check_zero, return_left, return_right, return_nan, done; fcmpu(lhs, rhs); @@ -1965,7 +1934,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, ble(on_in_range); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -1998,7 +1967,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, bind(&done); } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { DoubleRegister double_scratch = kScratchDoubleReg; @@ -2163,10 +2132,10 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( bind(&maybe_has_optimized_code); Register optimized_code_entry = flags; - LoadAnyTaggedField(optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kMaybeOptimizedCodeOffset), - r0); + LoadTaggedField(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kMaybeOptimizedCodeOffset), + r0); TailCallOptimizedCodeSlot(this, optimized_code_entry, r9); } @@ -2254,7 +2223,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, } } -void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { +void MacroAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { Label L; b(cond, &L, cr); Abort(reason); @@ -2262,7 +2231,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -2306,26 +2275,26 @@ void TurboAssembler::Abort(AbortReason reason) { // will not return here } -void TurboAssembler::LoadMap(Register destination, Register object) { - LoadTaggedPointerField(destination, - FieldMemOperand(object, HeapObject::kMapOffset), r0); +void MacroAssembler::LoadMap(Register destination, Register object) { + LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset), + r0); } void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { LoadMap(dst, cp); - LoadTaggedPointerField( + LoadTaggedField( dst, FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset), r0); - LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)), r0); + LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index)), r0); } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) { +void MacroAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) { if (v8_flags.debug_code) Check(cond, reason, cr); } -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (v8_flags.debug_code) { static_assert(kSmiTag == 0); TestIfSmi(object, r0); @@ -2333,7 +2302,7 @@ void TurboAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (v8_flags.debug_code) { static_assert(kSmiTag == 0); TestIfSmi(object, r0); @@ -2431,7 +2400,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, static const int kRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; if (num_double_arguments > DoubleRegister::kNumRegisters) { @@ -2445,7 +2414,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { int frame_alignment = ActivationFrameAlignment(); @@ -2474,16 +2443,16 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, StoreU64WithUpdate(r0, MemOperand(sp, -stack_space * kSystemPointerSize)); } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); } +void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); } -void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); } +void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); } -void TurboAssembler::MovToFloatParameters(DoubleRegister src1, +void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { if (src2 == d1) { DCHECK(src1 != d2); @@ -2495,7 +2464,7 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1, } } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments, bool has_function_descriptor) { @@ -2504,25 +2473,25 @@ void TurboAssembler::CallCFunction(ExternalReference function, has_function_descriptor); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments, bool has_function_descriptor) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments, has_function_descriptor); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments, bool has_function_descriptor) { CallCFunction(function, num_arguments, 0, has_function_descriptor); } -void TurboAssembler::CallCFunction(Register function, int num_arguments, +void MacroAssembler::CallCFunction(Register function, int num_arguments, bool has_function_descriptor) { CallCFunction(function, num_arguments, 0, has_function_descriptor); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments, bool has_function_descriptor) { @@ -2605,7 +2574,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, } } -void TurboAssembler::CheckPageFlag( +void MacroAssembler::CheckPageFlag( Register object, Register scratch, // scratch may be same register as object int mask, Condition cc, Label* condition_met) { @@ -2625,9 +2594,9 @@ void TurboAssembler::CheckPageFlag( } } -void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); } +void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); } -void TurboAssembler::ResetRoundingMode() { +void MacroAssembler::ResetRoundingMode() { mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest) } @@ -2636,15 +2605,15 @@ void TurboAssembler::ResetRoundingMode() { // New MacroAssembler Interfaces added for PPC // //////////////////////////////////////////////////////////////////////////////// -void TurboAssembler::LoadIntLiteral(Register dst, int value) { +void MacroAssembler::LoadIntLiteral(Register dst, int value) { mov(dst, Operand(value)); } -void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) { +void MacroAssembler::LoadSmiLiteral(Register dst, Smi smi) { mov(dst, Operand(smi)); } -void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, +void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, base::Double value, Register scratch) { if (V8_EMBEDDED_CONSTANT_POOL_BOOL && is_constant_pool_available() && !(scratch == r0 && ConstantPoolAccessIsInOverflow())) { @@ -2693,7 +2662,7 @@ void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src, +void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src, Register scratch) { // sign-extend src to 64-bit #if V8_TARGET_ARCH_PPC64 @@ -2718,7 +2687,7 @@ void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src, +void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src, Register scratch) { // zero-extend src to 64-bit #if V8_TARGET_ARCH_PPC64 @@ -2743,7 +2712,7 @@ void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, +void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, #if !V8_TARGET_ARCH_PPC64 Register src_hi, #endif @@ -2768,7 +2737,7 @@ void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, } #if V8_TARGET_ARCH_PPC64 -void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, +void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi, Register src_lo, Register scratch) { @@ -2788,7 +2757,7 @@ void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, } #endif -void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src, +void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src, Register scratch) { #if V8_TARGET_ARCH_PPC64 if (CpuFeatures::IsSupported(PPC_8_PLUS)) { @@ -2807,7 +2776,7 @@ void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src, +void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch) { #if V8_TARGET_ARCH_PPC64 if (CpuFeatures::IsSupported(PPC_8_PLUS)) { @@ -2826,7 +2795,7 @@ void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src, addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) { +void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) { #if V8_TARGET_ARCH_PPC64 if (CpuFeatures::IsSupported(PPC_8_PLUS)) { mffprwz(dst, src); @@ -2841,7 +2810,7 @@ void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) { addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) { +void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) { #if V8_TARGET_ARCH_PPC64 if (CpuFeatures::IsSupported(PPC_8_PLUS)) { mffprd(dst, src); @@ -2857,7 +2826,7 @@ void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) { addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovDoubleToInt64( +void MacroAssembler::MovDoubleToInt64( #if !V8_TARGET_ARCH_PPC64 Register dst_hi, #endif @@ -2881,7 +2850,7 @@ void TurboAssembler::MovDoubleToInt64( addi(sp, sp, Operand(kDoubleSize)); } -void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src, +void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src, Register scratch) { if (CpuFeatures::IsSupported(PPC_8_PLUS)) { ShiftLeftU64(scratch, src, Operand(32)); @@ -2896,7 +2865,7 @@ void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src, addi(sp, sp, Operand(kFloatSize)); } -void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src, +void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch) { if (CpuFeatures::IsSupported(PPC_8_PLUS)) { xscvdpspn(scratch, src); @@ -2910,12 +2879,12 @@ void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src, addi(sp, sp, Operand(kFloatSize)); } -void TurboAssembler::AddS64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::AddS64(Register dst, Register src, Register value, OEBit s, RCBit r) { add(dst, src, value, s, r); } -void TurboAssembler::AddS64(Register dst, Register src, const Operand& value, +void MacroAssembler::AddS64(Register dst, Register src, const Operand& value, Register scratch, OEBit s, RCBit r) { if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) { addi(dst, src, value); @@ -2925,12 +2894,12 @@ void TurboAssembler::AddS64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::SubS64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::SubS64(Register dst, Register src, Register value, OEBit s, RCBit r) { sub(dst, src, value, s, r); } -void TurboAssembler::SubS64(Register dst, Register src, const Operand& value, +void MacroAssembler::SubS64(Register dst, Register src, const Operand& value, Register scratch, OEBit s, RCBit r) { if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) { subi(dst, src, value); @@ -2940,31 +2909,31 @@ void TurboAssembler::SubS64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::AddS32(Register dst, Register src, Register value, +void MacroAssembler::AddS32(Register dst, Register src, Register value, RCBit r) { AddS64(dst, src, value, LeaveOE, r); extsw(dst, dst, r); } -void TurboAssembler::AddS32(Register dst, Register src, const Operand& value, +void MacroAssembler::AddS32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { AddS64(dst, src, value, scratch, LeaveOE, r); extsw(dst, dst, r); } -void TurboAssembler::SubS32(Register dst, Register src, Register value, +void MacroAssembler::SubS32(Register dst, Register src, Register value, RCBit r) { SubS64(dst, src, value, LeaveOE, r); extsw(dst, dst, r); } -void TurboAssembler::SubS32(Register dst, Register src, const Operand& value, +void MacroAssembler::SubS32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { SubS64(dst, src, value, scratch, LeaveOE, r); extsw(dst, dst, r); } -void TurboAssembler::MulS64(Register dst, Register src, const Operand& value, +void MacroAssembler::MulS64(Register dst, Register src, const Operand& value, Register scratch, OEBit s, RCBit r) { if (is_int16(value.immediate()) && s == LeaveOE && r == LeaveRC) { mulli(dst, src, value); @@ -2974,45 +2943,45 @@ void TurboAssembler::MulS64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::MulS64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::MulS64(Register dst, Register src, Register value, OEBit s, RCBit r) { mulld(dst, src, value, s, r); } -void TurboAssembler::MulS32(Register dst, Register src, const Operand& value, +void MacroAssembler::MulS32(Register dst, Register src, const Operand& value, Register scratch, OEBit s, RCBit r) { MulS64(dst, src, value, scratch, s, r); extsw(dst, dst, r); } -void TurboAssembler::MulS32(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::MulS32(Register dst, Register src, Register value, OEBit s, RCBit r) { MulS64(dst, src, value, s, r); extsw(dst, dst, r); } -void TurboAssembler::DivS64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::DivS64(Register dst, Register src, Register value, OEBit s, RCBit r) { divd(dst, src, value, s, r); } -void TurboAssembler::DivU64(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::DivU64(Register dst, Register src, Register value, OEBit s, RCBit r) { divdu(dst, src, value, s, r); } -void TurboAssembler::DivS32(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::DivS32(Register dst, Register src, Register value, OEBit s, RCBit r) { divw(dst, src, value, s, r); extsw(dst, dst); } -void TurboAssembler::DivU32(Register dst, Register src, Register value, OEBit s, +void MacroAssembler::DivU32(Register dst, Register src, Register value, OEBit s, RCBit r) { divwu(dst, src, value, s, r); ZeroExtWord32(dst, dst); } -void TurboAssembler::ModS64(Register dst, Register src, Register value) { +void MacroAssembler::ModS64(Register dst, Register src, Register value) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { modsd(dst, src, value); } else { @@ -3025,7 +2994,7 @@ void TurboAssembler::ModS64(Register dst, Register src, Register value) { } } -void TurboAssembler::ModU64(Register dst, Register src, Register value) { +void MacroAssembler::ModU64(Register dst, Register src, Register value) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { modud(dst, src, value); } else { @@ -3038,7 +3007,7 @@ void TurboAssembler::ModU64(Register dst, Register src, Register value) { } } -void TurboAssembler::ModS32(Register dst, Register src, Register value) { +void MacroAssembler::ModS32(Register dst, Register src, Register value) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { modsw(dst, src, value); } else { @@ -3051,7 +3020,7 @@ void TurboAssembler::ModS32(Register dst, Register src, Register value) { } extsw(dst, dst); } -void TurboAssembler::ModU32(Register dst, Register src, Register value) { +void MacroAssembler::ModU32(Register dst, Register src, Register value) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { moduw(dst, src, value); } else { @@ -3065,7 +3034,7 @@ void TurboAssembler::ModU32(Register dst, Register src, Register value) { ZeroExtWord32(dst, dst); } -void TurboAssembler::AndU64(Register dst, Register src, const Operand& value, +void MacroAssembler::AndU64(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { if (is_uint16(value.immediate()) && r == SetRC) { andi(dst, src, value); @@ -3075,12 +3044,12 @@ void TurboAssembler::AndU64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::AndU64(Register dst, Register src, Register value, +void MacroAssembler::AndU64(Register dst, Register src, Register value, RCBit r) { and_(dst, src, value, r); } -void TurboAssembler::OrU64(Register dst, Register src, const Operand& value, +void MacroAssembler::OrU64(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { if (is_int16(value.immediate()) && r == LeaveRC) { ori(dst, src, value); @@ -3090,12 +3059,12 @@ void TurboAssembler::OrU64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::OrU64(Register dst, Register src, Register value, +void MacroAssembler::OrU64(Register dst, Register src, Register value, RCBit r) { orx(dst, src, value, r); } -void TurboAssembler::XorU64(Register dst, Register src, const Operand& value, +void MacroAssembler::XorU64(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { if (is_int16(value.immediate()) && r == LeaveRC) { xori(dst, src, value); @@ -3105,112 +3074,112 @@ void TurboAssembler::XorU64(Register dst, Register src, const Operand& value, } } -void TurboAssembler::XorU64(Register dst, Register src, Register value, +void MacroAssembler::XorU64(Register dst, Register src, Register value, RCBit r) { xor_(dst, src, value, r); } -void TurboAssembler::AndU32(Register dst, Register src, const Operand& value, +void MacroAssembler::AndU32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { AndU64(dst, src, value, scratch, r); extsw(dst, dst, r); } -void TurboAssembler::AndU32(Register dst, Register src, Register value, +void MacroAssembler::AndU32(Register dst, Register src, Register value, RCBit r) { AndU64(dst, src, value, r); extsw(dst, dst, r); } -void TurboAssembler::OrU32(Register dst, Register src, const Operand& value, +void MacroAssembler::OrU32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { OrU64(dst, src, value, scratch, r); extsw(dst, dst, r); } -void TurboAssembler::OrU32(Register dst, Register src, Register value, +void MacroAssembler::OrU32(Register dst, Register src, Register value, RCBit r) { OrU64(dst, src, value, r); extsw(dst, dst, r); } -void TurboAssembler::XorU32(Register dst, Register src, const Operand& value, +void MacroAssembler::XorU32(Register dst, Register src, const Operand& value, Register scratch, RCBit r) { XorU64(dst, src, value, scratch, r); extsw(dst, dst, r); } -void TurboAssembler::XorU32(Register dst, Register src, Register value, +void MacroAssembler::XorU32(Register dst, Register src, Register value, RCBit r) { XorU64(dst, src, value, r); extsw(dst, dst, r); } -void TurboAssembler::ShiftLeftU64(Register dst, Register src, +void MacroAssembler::ShiftLeftU64(Register dst, Register src, const Operand& value, RCBit r) { sldi(dst, src, value, r); } -void TurboAssembler::ShiftRightU64(Register dst, Register src, +void MacroAssembler::ShiftRightU64(Register dst, Register src, const Operand& value, RCBit r) { srdi(dst, src, value, r); } -void TurboAssembler::ShiftRightS64(Register dst, Register src, +void MacroAssembler::ShiftRightS64(Register dst, Register src, const Operand& value, RCBit r) { sradi(dst, src, value.immediate(), r); } -void TurboAssembler::ShiftLeftU32(Register dst, Register src, +void MacroAssembler::ShiftLeftU32(Register dst, Register src, const Operand& value, RCBit r) { slwi(dst, src, value, r); } -void TurboAssembler::ShiftRightU32(Register dst, Register src, +void MacroAssembler::ShiftRightU32(Register dst, Register src, const Operand& value, RCBit r) { srwi(dst, src, value, r); } -void TurboAssembler::ShiftRightS32(Register dst, Register src, +void MacroAssembler::ShiftRightS32(Register dst, Register src, const Operand& value, RCBit r) { srawi(dst, src, value.immediate(), r); } -void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register value, +void MacroAssembler::ShiftLeftU64(Register dst, Register src, Register value, RCBit r) { sld(dst, src, value, r); } -void TurboAssembler::ShiftRightU64(Register dst, Register src, Register value, +void MacroAssembler::ShiftRightU64(Register dst, Register src, Register value, RCBit r) { srd(dst, src, value, r); } -void TurboAssembler::ShiftRightS64(Register dst, Register src, Register value, +void MacroAssembler::ShiftRightS64(Register dst, Register src, Register value, RCBit r) { srad(dst, src, value, r); } -void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register value, +void MacroAssembler::ShiftLeftU32(Register dst, Register src, Register value, RCBit r) { slw(dst, src, value, r); } -void TurboAssembler::ShiftRightU32(Register dst, Register src, Register value, +void MacroAssembler::ShiftRightU32(Register dst, Register src, Register value, RCBit r) { srw(dst, src, value, r); } -void TurboAssembler::ShiftRightS32(Register dst, Register src, Register value, +void MacroAssembler::ShiftRightS32(Register dst, Register src, Register value, RCBit r) { sraw(dst, src, value, r); } -void TurboAssembler::CmpS64(Register src1, Register src2, CRegister cr) { +void MacroAssembler::CmpS64(Register src1, Register src2, CRegister cr) { cmp(src1, src2, cr); } -void TurboAssembler::CmpS64(Register src1, const Operand& src2, +void MacroAssembler::CmpS64(Register src1, const Operand& src2, Register scratch, CRegister cr) { intptr_t value = src2.immediate(); if (is_int16(value)) { @@ -3221,7 +3190,7 @@ void TurboAssembler::CmpS64(Register src1, const Operand& src2, } } -void TurboAssembler::CmpU64(Register src1, const Operand& src2, +void MacroAssembler::CmpU64(Register src1, const Operand& src2, Register scratch, CRegister cr) { intptr_t value = src2.immediate(); if (is_uint16(value)) { @@ -3232,11 +3201,11 @@ void TurboAssembler::CmpU64(Register src1, const Operand& src2, } } -void TurboAssembler::CmpU64(Register src1, Register src2, CRegister cr) { +void MacroAssembler::CmpU64(Register src1, Register src2, CRegister cr) { cmpl(src1, src2, cr); } -void TurboAssembler::CmpS32(Register src1, const Operand& src2, +void MacroAssembler::CmpS32(Register src1, const Operand& src2, Register scratch, CRegister cr) { intptr_t value = src2.immediate(); if (is_int16(value)) { @@ -3247,11 +3216,11 @@ void TurboAssembler::CmpS32(Register src1, const Operand& src2, } } -void TurboAssembler::CmpS32(Register src1, Register src2, CRegister cr) { +void MacroAssembler::CmpS32(Register src1, Register src2, CRegister cr) { cmpw(src1, src2, cr); } -void TurboAssembler::CmpU32(Register src1, const Operand& src2, +void MacroAssembler::CmpU32(Register src1, const Operand& src2, Register scratch, CRegister cr) { intptr_t value = src2.immediate(); if (is_uint16(value)) { @@ -3262,55 +3231,55 @@ void TurboAssembler::CmpU32(Register src1, const Operand& src2, } } -void TurboAssembler::CmpU32(Register src1, Register src2, CRegister cr) { +void MacroAssembler::CmpU32(Register src1, Register src2, CRegister cr) { cmplw(src1, src2, cr); } -void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fadd(dst, lhs, rhs, r); } -void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fsub(dst, lhs, rhs, r); } -void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fmul(dst, lhs, rhs, r); } -void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fdiv(dst, lhs, rhs, r); } -void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fadd(dst, lhs, rhs, r); frsp(dst, dst, r); } -void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fsub(dst, lhs, rhs, r); frsp(dst, dst, r); } -void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fmul(dst, lhs, rhs, r); frsp(dst, dst, r); } -void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fdiv(dst, lhs, rhs, r); frsp(dst, dst, r); } -void TurboAssembler::CopySignF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::CopySignF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs, RCBit r) { fcpsgn(dst, rhs, lhs, r); } @@ -3513,7 +3482,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi, V(StoreU64WithUpdate, stdu, stdux) #define MEM_OP_WITH_ALIGN_FUNCTION(name, ri_op, rr_op) \ - void TurboAssembler::name(Register reg, const MemOperand& mem, \ + void MacroAssembler::name(Register reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op); \ } @@ -3527,7 +3496,7 @@ MEM_OP_WITH_ALIGN_LIST(MEM_OP_WITH_ALIGN_FUNCTION) V(StoreU64, std, pstd, stdx) #define MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION(name, ri_op, rip_op, rr_op) \ - void TurboAssembler::name(Register reg, const MemOperand& mem, \ + void MacroAssembler::name(Register reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationWithAlignPrefixed(reg, mem, ri_op, rip_op, rr_op); \ } @@ -3542,7 +3511,7 @@ MEM_OP_WITH_ALIGN_PREFIXED_LIST(MEM_OP_WITH_ALIGN_PREFIXED_FUNCTION) V(StoreF32WithUpdate, DoubleRegister, stfsu, stfsux) #define MEM_OP_FUNCTION(name, result_t, ri_op, rr_op) \ - void TurboAssembler::name(result_t reg, const MemOperand& mem, \ + void MacroAssembler::name(result_t reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperation(reg, mem, ri_op, rr_op); \ } @@ -3564,7 +3533,7 @@ MEM_OP_LIST(MEM_OP_FUNCTION) V(StoreF32, DoubleRegister, stfs, pstfs, stfsx) #define MEM_OP_PREFIXED_FUNCTION(name, result_t, ri_op, rip_op, rr_op) \ - void TurboAssembler::name(result_t reg, const MemOperand& mem, \ + void MacroAssembler::name(result_t reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationPrefixed(reg, mem, ri_op, rip_op, rr_op); \ } @@ -3581,7 +3550,7 @@ MEM_OP_PREFIXED_LIST(MEM_OP_PREFIXED_FUNCTION) V(LoadSimd128Uint8, lxsibzx) #define MEM_OP_SIMD_FUNCTION(name, rr_op) \ - void TurboAssembler::name(Simd128Register reg, const MemOperand& mem, \ + void MacroAssembler::name(Simd128Register reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationRR(reg, mem, rr_op); \ } @@ -3589,7 +3558,7 @@ MEM_OP_SIMD_LIST(MEM_OP_SIMD_FUNCTION) #undef MEM_OP_SIMD_LIST #undef MEM_OP_SIMD_FUNCTION -void TurboAssembler::LoadS8(Register dst, const MemOperand& mem, +void MacroAssembler::LoadS8(Register dst, const MemOperand& mem, Register scratch) { LoadU8(dst, mem, scratch); extsb(dst, dst); @@ -3605,13 +3574,13 @@ void TurboAssembler::LoadS8(Register dst, const MemOperand& mem, #ifdef V8_TARGET_BIG_ENDIAN #define MEM_LE_OP_FUNCTION(name, op) \ - void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \ + void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \ Register scratch) { \ GenerateMemoryOperationRR(reg, mem, op); \ } #else #define MEM_LE_OP_FUNCTION(name, op) \ - void TurboAssembler::name##LE(Register reg, const MemOperand& mem, \ + void MacroAssembler::name##LE(Register reg, const MemOperand& mem, \ Register scratch) { \ name(reg, mem, scratch); \ } @@ -3621,7 +3590,7 @@ MEM_LE_OP_LIST(MEM_LE_OP_FUNCTION) #undef MEM_LE_OP_FUNCTION #undef MEM_LE_OP_LIST -void TurboAssembler::LoadS32LE(Register dst, const MemOperand& mem, +void MacroAssembler::LoadS32LE(Register dst, const MemOperand& mem, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN LoadU32LE(dst, mem, scratch); @@ -3631,7 +3600,7 @@ void TurboAssembler::LoadS32LE(Register dst, const MemOperand& mem, #endif } -void TurboAssembler::LoadS16LE(Register dst, const MemOperand& mem, +void MacroAssembler::LoadS16LE(Register dst, const MemOperand& mem, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN LoadU16LE(dst, mem, scratch); @@ -3641,7 +3610,7 @@ void TurboAssembler::LoadS16LE(Register dst, const MemOperand& mem, #endif } -void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem, +void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem, Register scratch, Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN LoadU64LE(scratch, mem, scratch2); @@ -3653,7 +3622,7 @@ void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& mem, #endif } -void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem, +void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem, Register scratch, Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN LoadU32LE(scratch, mem, scratch2); @@ -3665,7 +3634,7 @@ void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& mem, #endif } -void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem, +void MacroAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem, Register scratch, Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN StoreF64(dst, mem, scratch2); @@ -3676,7 +3645,7 @@ void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem, #endif } -void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem, +void MacroAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem, Register scratch, Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN StoreF32(dst, mem, scratch2); @@ -3749,7 +3718,7 @@ void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem, V(S128AndNot, vandc) #define EMIT_SIMD_BINOP(name, op) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2) { \ op(dst, src1, src2); \ } @@ -3772,13 +3741,13 @@ SIMD_BINOP_LIST(EMIT_SIMD_BINOP) V(I8x16ShrU, vsrb) #define EMIT_SIMD_SHIFT(name, op) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Register src2, Simd128Register scratch) { \ mtvsrd(scratch, src2); \ vspltb(scratch, scratch, Operand(7)); \ op(dst, src1, scratch); \ } \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ const Operand& src2, Register scratch1, \ Simd128Register scratch2) { \ mov(scratch1, src2); \ @@ -3815,7 +3784,7 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT) V(I8x16Popcnt, vpopcntb) #define EMIT_SIMD_UNOP(name, op) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \ op(dst, src); \ } SIMD_UNOP_LIST(EMIT_SIMD_UNOP) @@ -3836,7 +3805,7 @@ SIMD_UNOP_LIST(EMIT_SIMD_UNOP) V(I16x8ExtMulHighI8x16U, vmuleub, vmuloub, vmrghh) #define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2, Simd128Register scratch) { \ EXT_MUL(scratch, dst, mul_even, mul_odd) \ merge(dst, scratch, dst); \ @@ -3852,7 +3821,7 @@ SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL) V(I8x16AllTrue, vcmpgtub) #define EMIT_SIMD_ALL_TRUE(name, op) \ - void TurboAssembler::name(Register dst, Simd128Register src, \ + void MacroAssembler::name(Register dst, Simd128Register src, \ Register scratch1, Register scratch2, \ Simd128Register scratch3) { \ constexpr uint8_t fxm = 0x2; /* field mask. */ \ @@ -3875,7 +3844,7 @@ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE) V(I16x8BitMask, vextracthm, 0x10203040506070) #define EMIT_SIMD_BITMASK(name, op, indicies) \ - void TurboAssembler::name(Register dst, Simd128Register src, \ + void MacroAssembler::name(Register dst, Simd128Register src, \ Register scratch1, Simd128Register scratch2) { \ if (CpuFeatures::IsSupported(PPC_10_PLUS)) { \ op(dst, src); \ @@ -3898,7 +3867,7 @@ SIMD_BITMASK_LIST(EMIT_SIMD_BITMASK) V(F32x4Qfms, xvnmsubmsp) #define EMIT_SIMD_QFM(name, op) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2, Simd128Register src3, \ Simd128Register scratch) { \ Simd128Register dest = dst; \ @@ -3915,7 +3884,7 @@ SIMD_QFM_LIST(EMIT_SIMD_QFM) #undef EMIT_SIMD_QFM #undef SIMD_QFM_LIST -void TurboAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst, +void MacroAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -3925,7 +3894,7 @@ void TurboAssembler::I64x2ExtMulLowI32x4S(Simd128Register dst, vinsertd(dst, scratch, Operand(0)); } -void TurboAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst, +void MacroAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -3935,7 +3904,7 @@ void TurboAssembler::I64x2ExtMulHighI32x4S(Simd128Register dst, vor(dst, scratch, scratch); } -void TurboAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst, +void MacroAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -3945,7 +3914,7 @@ void TurboAssembler::I64x2ExtMulLowI32x4U(Simd128Register dst, vinsertd(dst, scratch, Operand(0)); } -void TurboAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst, +void MacroAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -3956,7 +3925,7 @@ void TurboAssembler::I64x2ExtMulHighI32x4U(Simd128Register dst, } #undef EXT_MUL -void TurboAssembler::LoadSimd128LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadSimd128LE(Simd128Register dst, const MemOperand& mem, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN LoadSimd128(dst, mem, scratch); @@ -3966,7 +3935,7 @@ void TurboAssembler::LoadSimd128LE(Simd128Register dst, const MemOperand& mem, #endif } -void TurboAssembler::StoreSimd128LE(Simd128Register src, const MemOperand& mem, +void MacroAssembler::StoreSimd128LE(Simd128Register src, const MemOperand& mem, Register scratch1, Simd128Register scratch2) { #ifdef V8_TARGET_BIG_ENDIAN @@ -3977,7 +3946,7 @@ void TurboAssembler::StoreSimd128LE(Simd128Register src, const MemOperand& mem, #endif } -void TurboAssembler::F64x2Splat(Simd128Register dst, DoubleRegister src, +void MacroAssembler::F64x2Splat(Simd128Register dst, DoubleRegister src, Register scratch) { constexpr int lane_width_in_bytes = 8; MovDoubleToInt64(scratch, src); @@ -3985,35 +3954,35 @@ void TurboAssembler::F64x2Splat(Simd128Register dst, DoubleRegister src, vinsertd(dst, dst, Operand(1 * lane_width_in_bytes)); } -void TurboAssembler::F32x4Splat(Simd128Register dst, DoubleRegister src, +void MacroAssembler::F32x4Splat(Simd128Register dst, DoubleRegister src, DoubleRegister scratch1, Register scratch2) { MovFloatToInt(scratch2, src, scratch1); mtvsrd(dst, scratch2); vspltw(dst, dst, Operand(1)); } -void TurboAssembler::I64x2Splat(Simd128Register dst, Register src) { +void MacroAssembler::I64x2Splat(Simd128Register dst, Register src) { constexpr int lane_width_in_bytes = 8; mtvsrd(dst, src); vinsertd(dst, dst, Operand(1 * lane_width_in_bytes)); } -void TurboAssembler::I32x4Splat(Simd128Register dst, Register src) { +void MacroAssembler::I32x4Splat(Simd128Register dst, Register src) { mtvsrd(dst, src); vspltw(dst, dst, Operand(1)); } -void TurboAssembler::I16x8Splat(Simd128Register dst, Register src) { +void MacroAssembler::I16x8Splat(Simd128Register dst, Register src) { mtvsrd(dst, src); vsplth(dst, dst, Operand(3)); } -void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) { +void MacroAssembler::I8x16Splat(Simd128Register dst, Register src) { mtvsrd(dst, src); vspltb(dst, dst, Operand(7)); } -void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, +void MacroAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2) { @@ -4023,7 +3992,7 @@ void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, MovInt64ToDouble(dst, scratch2); } -void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, +void MacroAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch1, Register scratch2, Register scratch3) { @@ -4033,7 +4002,7 @@ void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, MovIntToFloat(dst, scratch2, scratch3); } -void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src, +void MacroAssembler::I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 8; @@ -4041,7 +4010,7 @@ void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src, mfvsrd(dst, scratch); } -void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src, +void MacroAssembler::I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 4; @@ -4049,7 +4018,7 @@ void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src, mfvsrd(dst, scratch); } -void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, +void MacroAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 2; @@ -4057,28 +4026,28 @@ void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, mfvsrd(dst, scratch); } -void TurboAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src, +void MacroAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { I16x8ExtractLaneU(dst, src, imm_lane_idx, scratch); extsh(dst, dst); } -void TurboAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src, +void MacroAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { vextractub(scratch, src, Operand(15 - imm_lane_idx)); mfvsrd(dst, scratch); } -void TurboAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src, +void MacroAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Simd128Register scratch) { I8x16ExtractLaneU(dst, src, imm_lane_idx, scratch); extsb(dst, dst); } -void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, Simd128Register scratch2) { @@ -4095,7 +4064,7 @@ void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch1, DoubleRegister scratch2, @@ -4113,7 +4082,7 @@ void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 8; @@ -4128,7 +4097,7 @@ void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 4; @@ -4143,7 +4112,7 @@ void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch) { constexpr int lane_width_in_bytes = 2; @@ -4154,7 +4123,7 @@ void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, vinserth(dst, scratch, Operand((7 - imm_lane_idx) * lane_width_in_bytes)); } -void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Simd128Register scratch) { if (src1 != dst) { @@ -4164,7 +4133,7 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, vinsertb(dst, scratch, Operand(15 - imm_lane_idx)); } -void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Register scratch3, Simd128Register scratch4) { @@ -4191,7 +4160,7 @@ void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, } } -void TurboAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vxor(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); vmladduhm(dst, src1, src2, kSimd128RegZero); @@ -4204,7 +4173,7 @@ void TurboAssembler::I16x8Mul(Simd128Register dst, Simd128Register src1, vsel(dst, src2, result, scratch2); \ /* Use xvmindp to turn any selected SNANs to QNANs. */ \ xvmindp(dst, dst, dst); -void TurboAssembler::F64x2Min(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Min(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { xvmindp(scratch1, src1, src2); @@ -4212,7 +4181,7 @@ void TurboAssembler::F64x2Min(Simd128Register dst, Simd128Register src1, F64X2_MIN_MAX_NAN(scratch1) } -void TurboAssembler::F64x2Max(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Max(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { xvmaxdp(scratch1, src1, src2); @@ -4221,108 +4190,108 @@ void TurboAssembler::F64x2Max(Simd128Register dst, Simd128Register src1, } #undef F64X2_MIN_MAX_NAN -void TurboAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1, Simd128Register src2) { xvcmpgtdp(dst, src2, src1); } -void TurboAssembler::F64x2Le(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Le(Simd128Register dst, Simd128Register src1, Simd128Register src2) { xvcmpgedp(dst, src2, src1); } -void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpeqdp(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1, Simd128Register src2) { xvcmpgtsp(dst, src2, src1); } -void TurboAssembler::F32x4Le(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Le(Simd128Register dst, Simd128Register src1, Simd128Register src2) { xvcmpgesp(dst, src2, src1); } -void TurboAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpeqsp(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequd(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpgtsd(scratch, src2, src1); vnor(dst, scratch, scratch); } -void TurboAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequw(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpgtsw(scratch, src2, src1); vnor(dst, scratch, scratch); } -void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequw(scratch, src1, src2); vcmpgtuw(dst, src1, src2); vor(dst, dst, scratch); } -void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequh(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpgtsh(scratch, src2, src1); vnor(dst, scratch, scratch); } -void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequh(scratch, src1, src2); vcmpgtuh(dst, src1, src2); vor(dst, dst, scratch); } -void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequb(scratch, src1, src2); vnor(dst, scratch, scratch); } -void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpgtsb(scratch, src2, src1); vnor(dst, scratch, scratch); } -void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vcmpequb(scratch, src1, src2); vcmpgtub(dst, src1, src2); vor(dst, dst, scratch); } -void TurboAssembler::I64x2Abs(Simd128Register dst, Simd128Register src, +void MacroAssembler::I64x2Abs(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int shift_bits = 63; xxspltib(scratch, Operand(shift_bits)); @@ -4330,7 +4299,7 @@ void TurboAssembler::I64x2Abs(Simd128Register dst, Simd128Register src, vxor(dst, src, scratch); vsubudm(dst, dst, scratch); } -void TurboAssembler::I32x4Abs(Simd128Register dst, Simd128Register src, +void MacroAssembler::I32x4Abs(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int shift_bits = 31; xxspltib(scratch, Operand(shift_bits)); @@ -4338,7 +4307,7 @@ void TurboAssembler::I32x4Abs(Simd128Register dst, Simd128Register src, vxor(dst, src, scratch); vsubuwm(dst, dst, scratch); } -void TurboAssembler::I16x8Abs(Simd128Register dst, Simd128Register src, +void MacroAssembler::I16x8Abs(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int shift_bits = 15; xxspltib(scratch, Operand(shift_bits)); @@ -4346,13 +4315,13 @@ void TurboAssembler::I16x8Abs(Simd128Register dst, Simd128Register src, vxor(dst, src, scratch); vsubuhm(dst, dst, scratch); } -void TurboAssembler::I16x8Neg(Simd128Register dst, Simd128Register src, +void MacroAssembler::I16x8Neg(Simd128Register dst, Simd128Register src, Simd128Register scratch) { vspltish(scratch, Operand(1)); vnor(dst, src, src); vadduhm(dst, scratch, dst); } -void TurboAssembler::I8x16Abs(Simd128Register dst, Simd128Register src, +void MacroAssembler::I8x16Abs(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int shift_bits = 7; xxspltib(scratch, Operand(shift_bits)); @@ -4360,38 +4329,38 @@ void TurboAssembler::I8x16Abs(Simd128Register dst, Simd128Register src, vxor(dst, src, scratch); vsububm(dst, dst, scratch); } -void TurboAssembler::I8x16Neg(Simd128Register dst, Simd128Register src, +void MacroAssembler::I8x16Neg(Simd128Register dst, Simd128Register src, Simd128Register scratch) { xxspltib(scratch, Operand(1)); vnor(dst, src, src); vaddubm(dst, scratch, dst); } -void TurboAssembler::F64x2Pmin(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Pmin(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpgtdp(kScratchSimd128Reg, src1, src2); vsel(dst, src1, src2, kScratchSimd128Reg); } -void TurboAssembler::F64x2Pmax(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Pmax(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpgtdp(kScratchSimd128Reg, src2, src1); vsel(dst, src1, src2, kScratchSimd128Reg); } -void TurboAssembler::F32x4Pmin(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Pmin(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpgtsp(kScratchSimd128Reg, src1, src2); vsel(dst, src1, src2, kScratchSimd128Reg); } -void TurboAssembler::F32x4Pmax(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Pmax(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { xvcmpgtsp(kScratchSimd128Reg, src2, src1); vsel(dst, src1, src2, kScratchSimd128Reg); } -void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst, +void MacroAssembler::I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch) { // NaN to 0 @@ -4400,37 +4369,37 @@ void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst, xvcvspsxws(dst, scratch); } -void TurboAssembler::I16x8SConvertI32x4(Simd128Register dst, +void MacroAssembler::I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpkswss(dst, src2, src1); } -void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst, +void MacroAssembler::I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpkswus(dst, src2, src1); } -void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst, +void MacroAssembler::I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpkshss(dst, src2, src1); } -void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst, +void MacroAssembler::I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpkshus(dst, src2, src1); } -void TurboAssembler::F64x2ConvertLowI32x4S(Simd128Register dst, +void MacroAssembler::F64x2ConvertLowI32x4S(Simd128Register dst, Simd128Register src) { vupklsw(dst, src); xvcvsxddp(dst, dst); } -void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, +void MacroAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4444,7 +4413,7 @@ void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, xvcvuxddp(dst, dst); } -void TurboAssembler::I64x2UConvertI32x4Low(Simd128Register dst, +void MacroAssembler::I64x2UConvertI32x4Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4457,7 +4426,7 @@ void TurboAssembler::I64x2UConvertI32x4Low(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I64x2UConvertI32x4High(Simd128Register dst, +void MacroAssembler::I64x2UConvertI32x4High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4470,7 +4439,7 @@ void TurboAssembler::I64x2UConvertI32x4High(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I32x4UConvertI16x8Low(Simd128Register dst, +void MacroAssembler::I32x4UConvertI16x8Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4482,7 +4451,7 @@ void TurboAssembler::I32x4UConvertI16x8Low(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I32x4UConvertI16x8High(Simd128Register dst, +void MacroAssembler::I32x4UConvertI16x8High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4494,7 +4463,7 @@ void TurboAssembler::I32x4UConvertI16x8High(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I16x8UConvertI8x16Low(Simd128Register dst, +void MacroAssembler::I16x8UConvertI8x16Low(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4506,7 +4475,7 @@ void TurboAssembler::I16x8UConvertI8x16Low(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I16x8UConvertI8x16High(Simd128Register dst, +void MacroAssembler::I16x8UConvertI8x16High(Simd128Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { @@ -4518,7 +4487,7 @@ void TurboAssembler::I16x8UConvertI8x16High(Simd128Register dst, vand(dst, scratch2, dst); } -void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src, +void MacroAssembler::I8x16BitMask(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3) { if (CpuFeatures::IsSupported(PPC_10_PLUS)) { @@ -4532,21 +4501,21 @@ void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src, } } -void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vxor(scratch, scratch, scratch); vmsumshm(dst, src1, src2, scratch); } -void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vxor(scratch, scratch, scratch); vmhraddshs(dst, src1, src2, scratch); } -void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { // Saturate the indices to 5 bits. Input indices more than 31 should @@ -4559,7 +4528,7 @@ void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, vperm(dst, dst, kSimd128RegZero, scratch); } -void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3) { @@ -4574,25 +4543,25 @@ void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, mul_even(scratch2, src, scratch1); \ mul_odd(scratch1, src, scratch1); \ add(dst, scratch2, scratch1); -void TurboAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst, +void MacroAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(vspltish, vmulesh, vmulosh, vadduwm) } -void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, +void MacroAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(vspltish, vmuleuh, vmulouh, vadduwm) } -void TurboAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst, +void MacroAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(xxspltib, vmulesb, vmulosb, vadduhm) } -void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, +void MacroAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { @@ -4600,7 +4569,7 @@ void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, } #undef EXT_ADD_PAIRWISE -void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst, +void MacroAssembler::F64x2PromoteLowF32x4(Simd128Register dst, Simd128Register src) { constexpr int lane_number = 8; vextractd(dst, src, Operand(lane_number)); @@ -4608,7 +4577,7 @@ void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst, xvcvspdp(dst, dst); } -void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, +void MacroAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int lane_number = 8; @@ -4619,7 +4588,7 @@ void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, vinsertd(dst, scratch, Operand(lane_number)); } -void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, +void MacroAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int lane_number = 8; @@ -4633,7 +4602,7 @@ void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, vinsertd(dst, scratch, Operand(lane_number)); } -void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, +void MacroAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { constexpr int lane_number = 8; @@ -4649,7 +4618,7 @@ void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, #else #define MAYBE_REVERSE_BYTES(reg, instr) #endif -void TurboAssembler::LoadLane64LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane, Register scratch1, Simd128Register scratch2) { constexpr int lane_width_in_bytes = 8; @@ -4657,7 +4626,7 @@ void TurboAssembler::LoadLane64LE(Simd128Register dst, const MemOperand& mem, MAYBE_REVERSE_BYTES(scratch2, xxbrd) vinsertd(dst, scratch2, Operand((1 - lane) * lane_width_in_bytes)); } -void TurboAssembler::LoadLane32LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane, Register scratch1, Simd128Register scratch2) { constexpr int lane_width_in_bytes = 4; @@ -4665,7 +4634,7 @@ void TurboAssembler::LoadLane32LE(Simd128Register dst, const MemOperand& mem, MAYBE_REVERSE_BYTES(scratch2, xxbrw) vinsertw(dst, scratch2, Operand((3 - lane) * lane_width_in_bytes)); } -void TurboAssembler::LoadLane16LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane, Register scratch1, Simd128Register scratch2) { constexpr int lane_width_in_bytes = 2; @@ -4673,7 +4642,7 @@ void TurboAssembler::LoadLane16LE(Simd128Register dst, const MemOperand& mem, MAYBE_REVERSE_BYTES(scratch2, xxbrh) vinserth(dst, scratch2, Operand((7 - lane) * lane_width_in_bytes)); } -void TurboAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane, Register scratch1, Simd128Register scratch2) { LoadSimd128Uint8(scratch2, mem, scratch1); @@ -4681,7 +4650,7 @@ void TurboAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem, } #undef MAYBE_REVERSE_BYTES -void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, +void MacroAssembler::V128AnyTrue(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3) { constexpr uint8_t fxm = 0x2; // field mask. @@ -4695,18 +4664,18 @@ void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, isel(dst, scratch1, scratch2, bit_number); } -void TurboAssembler::S128Not(Simd128Register dst, Simd128Register src) { +void MacroAssembler::S128Not(Simd128Register dst, Simd128Register src) { vnor(dst, src, src); } -void TurboAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low, +void MacroAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2) { mov(scratch1, Operand(low)); mov(scratch2, Operand(high)); mtvsrdd(dst, scratch2, scratch1); } -void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1, +void MacroAssembler::S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask) { vsel(dst, src2, src1, mask); } @@ -4726,7 +4695,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::SwapP(Register src, Register dst, Register scratch) { +void MacroAssembler::SwapP(Register src, Register dst, Register scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); mr(scratch, src); @@ -4734,7 +4703,7 @@ void TurboAssembler::SwapP(Register src, Register dst, Register scratch) { mr(dst, scratch); } -void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) { +void MacroAssembler::SwapP(Register src, MemOperand dst, Register scratch) { if (dst.ra() != r0 && dst.ra().is_valid()) DCHECK(!AreAliased(src, dst.ra(), scratch)); if (dst.rb() != r0 && dst.rb().is_valid()) @@ -4745,7 +4714,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) { StoreU64(scratch, dst, r0); } -void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, +void MacroAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, Register scratch_1) { if (src.ra() != r0 && src.ra().is_valid()) DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1)); @@ -4777,7 +4746,7 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, } } -void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, +void MacroAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); @@ -4786,7 +4755,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, fmr(dst, scratch); } -void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, +void MacroAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch) { DCHECK(!AreAliased(src, scratch)); fmr(scratch, src); @@ -4794,7 +4763,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, StoreF32(scratch, dst, r0); } -void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst, +void MacroAssembler::SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0, DoubleRegister scratch_1) { DCHECK(!AreAliased(scratch_0, scratch_1)); @@ -4804,7 +4773,7 @@ void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst, StoreF32(scratch_1, src, r0); } -void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, +void MacroAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); @@ -4813,7 +4782,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, fmr(dst, scratch); } -void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst, +void MacroAssembler::SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch) { DCHECK(!AreAliased(src, scratch)); fmr(scratch, src); @@ -4821,7 +4790,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst, StoreF64(scratch, dst, r0); } -void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst, +void MacroAssembler::SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0, DoubleRegister scratch_1) { DCHECK(!AreAliased(scratch_0, scratch_1)); @@ -4831,7 +4800,7 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst, StoreF64(scratch_1, src, r0); } -void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, +void MacroAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, Simd128Register scratch) { if (src == dst) return; vor(scratch, src, src); @@ -4839,7 +4808,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, vor(dst, scratch, scratch); } -void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst, +void MacroAssembler::SwapSimd128(Simd128Register src, MemOperand dst, Simd128Register scratch1, Register scratch2) { DCHECK(src != scratch1); LoadSimd128(scratch1, dst, scratch2); @@ -4847,7 +4816,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst, vor(src, scratch1, scratch1); } -void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, +void MacroAssembler::SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch1, Simd128Register scratch2, Register scratch3) { LoadSimd128(scratch1, src, scratch3); @@ -4857,7 +4826,7 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, StoreSimd128(scratch2, src, scratch3); } -void TurboAssembler::ByteReverseU16(Register dst, Register val, +void MacroAssembler::ByteReverseU16(Register dst, Register val, Register scratch) { if (CpuFeatures::IsSupported(PPC_10_PLUS)) { brh(dst, val); @@ -4870,7 +4839,7 @@ void TurboAssembler::ByteReverseU16(Register dst, Register val, ZeroExtHalfWord(dst, dst); } -void TurboAssembler::ByteReverseU32(Register dst, Register val, +void MacroAssembler::ByteReverseU32(Register dst, Register val, Register scratch) { if (CpuFeatures::IsSupported(PPC_10_PLUS)) { brw(dst, val); @@ -4883,7 +4852,7 @@ void TurboAssembler::ByteReverseU32(Register dst, Register val, ZeroExtWord32(dst, scratch); } -void TurboAssembler::ByteReverseU64(Register dst, Register val, Register) { +void MacroAssembler::ByteReverseU64(Register dst, Register val, Register) { if (CpuFeatures::IsSupported(PPC_10_PLUS)) { brd(dst, val); return; @@ -4894,17 +4863,17 @@ void TurboAssembler::ByteReverseU64(Register dst, Register val, Register) { addi(sp, sp, Operand(kSystemPointerSize)); } -void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { CmpS64(x, Operand(y), r0); beq(dest); } -void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { CmpS64(x, Operand(y), r0); blt(dest); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { static_assert(kSystemPointerSize == 8); static_assert(kSmiTagSize == 1); static_assert(kSmiTag == 0); @@ -4923,31 +4892,31 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { LoadU64(builtin_index, MemOperand(kRootRegister, builtin_index)); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { ASM_CODE_COMMENT(this); LoadU64(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); LoadU64(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset), r0); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -4957,20 +4926,20 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_object, code_object); Call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code_object, code_object); Jump(code_object); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. // @@ -5004,7 +4973,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { SizeOfCodeGeneratedSince(&start_call)); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5017,30 +4986,30 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::ZeroExtByte(Register dst, Register src) { +void MacroAssembler::ZeroExtByte(Register dst, Register src) { clrldi(dst, src, Operand(56)); } -void TurboAssembler::ZeroExtHalfWord(Register dst, Register src) { +void MacroAssembler::ZeroExtHalfWord(Register dst, Register src) { clrldi(dst, src, Operand(48)); } -void TurboAssembler::ZeroExtWord32(Register dst, Register src) { +void MacroAssembler::ZeroExtWord32(Register dst, Register src) { clrldi(dst, src, Operand(32)); } -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); } +void MacroAssembler::Popcnt32(Register dst, Register src) { popcntw(dst, src); } -void TurboAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); } +void MacroAssembler::Popcnt64(Register dst, Register src) { popcntd(dst, src); } -void TurboAssembler::CountLeadingZerosU32(Register dst, Register src, RCBit r) { +void MacroAssembler::CountLeadingZerosU32(Register dst, Register src, RCBit r) { cntlzw(dst, src, r); } -void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) { +void MacroAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) { cntlzd(dst, src, r); } @@ -5057,7 +5026,7 @@ void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, RCBit r) { addi(dst, dst, Operand(1)); /* dst++ */ \ bdnz(&loop); \ bind(&done); -void TurboAssembler::CountTrailingZerosU32(Register dst, Register src, +void MacroAssembler::CountTrailingZerosU32(Register dst, Register src, Register scratch1, Register scratch2, RCBit r) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { @@ -5067,7 +5036,7 @@ void TurboAssembler::CountTrailingZerosU32(Register dst, Register src, } } -void TurboAssembler::CountTrailingZerosU64(Register dst, Register src, +void MacroAssembler::CountTrailingZerosU64(Register dst, Register src, Register scratch1, Register scratch2, RCBit r) { if (CpuFeatures::IsSupported(PPC_9_PLUS)) { @@ -5078,14 +5047,14 @@ void TurboAssembler::CountTrailingZerosU64(Register dst, Register src, } #undef COUNT_TRAILING_ZEROES_SLOW -void TurboAssembler::ClearByteU64(Register dst, int byte_idx) { +void MacroAssembler::ClearByteU64(Register dst, int byte_idx) { CHECK(0 <= byte_idx && byte_idx <= 7); int shift = byte_idx*8; rldicl(dst, dst, shift, 8); rldicl(dst, dst, 64-shift, 0); } -void TurboAssembler::ReverseBitsU64(Register dst, Register src, +void MacroAssembler::ReverseBitsU64(Register dst, Register src, Register scratch1, Register scratch2) { ByteReverseU64(dst, src); for (int i = 0; i < 8; i++) { @@ -5093,7 +5062,7 @@ void TurboAssembler::ReverseBitsU64(Register dst, Register src, } } -void TurboAssembler::ReverseBitsU32(Register dst, Register src, +void MacroAssembler::ReverseBitsU32(Register dst, Register src, Register scratch1, Register scratch2) { ByteReverseU32(dst, src, scratch1); for (int i = 4; i < 8; i++) { @@ -5102,7 +5071,7 @@ void TurboAssembler::ReverseBitsU32(Register dst, Register src, } // byte_idx=7 refers to least significant byte -void TurboAssembler::ReverseBitsInSingleByteU64(Register dst, Register src, +void MacroAssembler::ReverseBitsInSingleByteU64(Register dst, Register src, Register scratch1, Register scratch2, int byte_idx) { diff --git a/src/codegen/ppc/macro-assembler-ppc.h b/src/codegen/ppc/macro-assembler-ppc.h index f85f4f3c57..c5c693ac22 100644 --- a/src/codegen/ppc/macro-assembler-ppc.h +++ b/src/codegen/ppc/macro-assembler-ppc.h @@ -47,9 +47,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, #define ClearRightImm clrrwi #endif -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; void CallBuiltin(Builtin builtin, Condition cond = al); void TailCallBuiltin(Builtin builtin, Condition cond = al, @@ -1010,19 +1010,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { #endif } - // Loads a field containing a HeapObject and decompresses it if pointer - // compression is enabled. - void LoadTaggedPointerField(const Register& destination, - const MemOperand& field_operand, - const Register& scratch = no_reg); + // Loads a field containing any tagged value and decompresses it if necessary. + void LoadTaggedField(const Register& destination, + const MemOperand& field_operand, + const Register& scratch = no_reg); void LoadTaggedSignedField(Register destination, MemOperand field_operand, Register scratch); - // Loads a field containing any tagged value and decompresses it if necessary. - void LoadAnyTaggedField(const Register& destination, - const MemOperand& field_operand, - const Register& scratch = no_reg); - // Compresses and stores tagged value to given on-heap location. void StoreTaggedField(const Register& value, const MemOperand& dst_field_operand, @@ -1030,11 +1024,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void DecompressTaggedSigned(Register destination, MemOperand field_operand); void DecompressTaggedSigned(Register destination, Register src); - void DecompressTaggedPointer(Register destination, MemOperand field_operand); - void DecompressTaggedPointer(Register destination, Register source); - void DecompressTaggedPointer(const Register& destination, Tagged_t immediate); - void DecompressAnyTagged(Register destination, MemOperand field_operand); - void DecompressAnyTagged(Register destination, Register source); + void DecompressTagged(Register destination, MemOperand field_operand); + void DecompressTagged(Register destination, Register source); + void DecompressTagged(const Register& destination, Tagged_t immediate); void LoadF64(DoubleRegister dst, const MemOperand& mem, Register scratch = no_reg); @@ -1438,21 +1430,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask); - private: - static const int kSmiShift = kSmiTagSize + kSmiShiftSize; - - int CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments); - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments, - bool has_function_descriptor); -}; - -// MacroAssembler implements a collection of frequently used acros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. // TODO(victorgomes): Remove this function once we stick with the reversed @@ -1745,6 +1722,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { private: static const int kSmiShift = kSmiTagSize + kSmiShiftSize; + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments, + bool has_function_descriptor); + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, diff --git a/src/codegen/riscv/assembler-riscv-inl.h b/src/codegen/riscv/assembler-riscv-inl.h index 262c1501c8..b9efcf502c 100644 --- a/src/codegen/riscv/assembler-riscv-inl.h +++ b/src/codegen/riscv/assembler-riscv-inl.h @@ -162,7 +162,7 @@ void Assembler::deserialization_set_target_internal_reference_at( HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); if (IsCompressedEmbeddedObject(rmode_)) { - return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny( + return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged( cage_base, Assembler::target_compressed_address_at(pc_, constant_pool_)))); } else { diff --git a/src/codegen/riscv/macro-assembler-riscv.cc b/src/codegen/riscv/macro-assembler-riscv.cc index ef69c4adba..acabc9b155 100644 --- a/src/codegen/riscv/macro-assembler-riscv.cc +++ b/src/codegen/riscv/macro-assembler-riscv.cc @@ -41,7 +41,7 @@ static inline bool IsZero(const Operand& rt) { } } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -58,7 +58,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -75,7 +75,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; if (fp_mode == SaveFPRegsMode::kSave) { @@ -238,20 +238,19 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( bind(&maybe_has_optimized_code); Register optimized_code_entry = flags; - LoadAnyTaggedField( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kMaybeOptimizedCodeOffset)); + LoadTaggedField(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kMaybeOptimizedCodeOffset)); TailCallOptimizedCodeSlot(this, optimized_code_entry, temps.Acquire(), temps.Acquire()); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { LoadWord(destination, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition cond, Register src1, const Operand& src2) { Label skip; @@ -261,7 +260,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index, bind(&skip); } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { if (marker_reg.is_valid()) { Push(ra, fp, marker_reg); AddWord(fp, sp, Operand(kSystemPointerSize)); @@ -271,7 +270,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int offset = -StandardFrameConstants::kContextOffset; if (function_reg.is_valid()) { Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); @@ -328,17 +327,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, bind(&done); } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { DCHECK(!AreAliased(object, slot_address)); @@ -361,7 +360,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -384,7 +383,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -413,7 +412,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, Register temp = temps.Acquire(); DCHECK(!AreAliased(object, value, temp)); AddWord(temp, object, offset); - LoadTaggedPointerField(temp, MemOperand(temp)); + LoadTaggedField(temp, MemOperand(temp)); Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, temp, Operand(value)); } @@ -469,7 +468,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, // --------------------------------------------------------------------------- // Instruction macros. #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -499,7 +498,7 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -541,15 +540,15 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::AddWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) { Add64(rd, rs, rt); } -void TurboAssembler::SubWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) { Sub64(rd, rs, rt); } -void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sub64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -598,7 +597,7 @@ void TurboAssembler::Sub64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Add64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && (rt.rm() != zero_reg) && (rs != zero_reg)) { @@ -638,7 +637,7 @@ void TurboAssembler::Add64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mulw(rd, rs, rt.rm()); } else { @@ -650,7 +649,7 @@ void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulh32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mul(rd, rs, rt.rm()); } else { @@ -663,7 +662,7 @@ void TurboAssembler::Mulh32(Register rd, Register rs, const Operand& rt) { srai(rd, rd, 32); } -void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt, +void MacroAssembler::Mulhu32(Register rd, Register rs, const Operand& rt, Register rsz, Register rtz) { slli(rsz, rs, 32); if (rt.is_reg()) { @@ -675,7 +674,7 @@ void TurboAssembler::Mulhu32(Register rd, Register rs, const Operand& rt, srai(rd, rd, 32); } -void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mul(rd, rs, rt.rm()); } else { @@ -687,7 +686,7 @@ void TurboAssembler::Mul64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulh64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mulh(rd, rs, rt.rm()); } else { @@ -699,7 +698,7 @@ void TurboAssembler::Mulh64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mulhu(rd, rs, rt.rm()); } else { @@ -711,7 +710,7 @@ void TurboAssembler::Mulhu64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Div32(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { divw(res, rs, rt.rm()); } else { @@ -723,7 +722,7 @@ void TurboAssembler::Div32(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mod32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { remw(rd, rs, rt.rm()); } else { @@ -735,7 +734,7 @@ void TurboAssembler::Mod32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Modu32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { remuw(rd, rs, rt.rm()); } else { @@ -747,7 +746,7 @@ void TurboAssembler::Modu32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Div64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { div(rd, rs, rt.rm()); } else { @@ -759,7 +758,7 @@ void TurboAssembler::Div64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Divu32(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { divuw(res, rs, rt.rm()); } else { @@ -771,7 +770,7 @@ void TurboAssembler::Divu32(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Divu64(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { divu(res, rs, rt.rm()); } else { @@ -783,7 +782,7 @@ void TurboAssembler::Divu64(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mod64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { rem(rd, rs, rt.rm()); } else { @@ -795,7 +794,7 @@ void TurboAssembler::Mod64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Modu64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { remu(rd, rs, rt.rm()); } else { @@ -807,11 +806,11 @@ void TurboAssembler::Modu64(Register rd, Register rs, const Operand& rt) { } } #elif V8_TARGET_ARCH_RISCV32 -void TurboAssembler::AddWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::AddWord(Register rd, Register rs, const Operand& rt) { Add32(rd, rs, rt); } -void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Add32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && (rt.rm() != zero_reg) && (rs != zero_reg)) { @@ -851,11 +850,11 @@ void TurboAssembler::Add32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SubWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SubWord(Register rd, Register rs, const Operand& rt) { Sub32(rd, rs, rt); } -void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sub32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -905,11 +904,11 @@ void TurboAssembler::Sub32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mul32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul32(Register rd, Register rs, const Operand& rt) { Mul(rd, rs, rt); } -void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mul(rd, rs, rt.rm()); } else { @@ -921,7 +920,7 @@ void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { mulh(rd, rs, rt.rm()); } else { @@ -933,7 +932,7 @@ void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt, +void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt, Register rsz, Register rtz) { if (rt.is_reg()) { mulhu(rd, rs, rt.rm()); @@ -946,7 +945,7 @@ void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt, } } -void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Div(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { div(res, rs, rt.rm()); } else { @@ -958,7 +957,7 @@ void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { } } -void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { rem(rd, rs, rt.rm()); } else { @@ -970,7 +969,7 @@ void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { remu(rd, rs, rt.rm()); } else { @@ -982,7 +981,7 @@ void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { +void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) { if (rt.is_reg()) { divu(res, rs, rt.rm()); } else { @@ -996,7 +995,7 @@ void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { #endif -void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -1022,7 +1021,7 @@ void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -1044,7 +1043,7 @@ void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && ((rd.code() & 0b11000) == 0b01000) && @@ -1066,7 +1065,7 @@ void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { or_(rd, rs, rt.rm()); not_(rd, rd); @@ -1076,12 +1075,12 @@ void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Neg(Register rs, const Operand& rt) { +void MacroAssembler::Neg(Register rs, const Operand& rt) { DCHECK(rt.is_reg()); neg(rs, rt.rm()); } -void TurboAssembler::Seqz(Register rd, const Operand& rt) { +void MacroAssembler::Seqz(Register rd, const Operand& rt) { if (rt.is_reg()) { seqz(rd, rt.rm()); } else { @@ -1089,7 +1088,7 @@ void TurboAssembler::Seqz(Register rd, const Operand& rt) { } } -void TurboAssembler::Snez(Register rd, const Operand& rt) { +void MacroAssembler::Snez(Register rd, const Operand& rt) { if (rt.is_reg()) { snez(rd, rt.rm()); } else { @@ -1097,7 +1096,7 @@ void TurboAssembler::Snez(Register rd, const Operand& rt) { } } -void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Seq(Register rd, Register rs, const Operand& rt) { if (rs == zero_reg) { Seqz(rd, rt); } else if (IsZero(rt)) { @@ -1108,7 +1107,7 @@ void TurboAssembler::Seq(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sne(Register rd, Register rs, const Operand& rt) { if (rs == zero_reg) { Snez(rd, rt); } else if (IsZero(rt)) { @@ -1119,7 +1118,7 @@ void TurboAssembler::Sne(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rs, rt.rm()); } else { @@ -1136,7 +1135,7 @@ void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rs, rt.rm()); } else { @@ -1153,7 +1152,7 @@ void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sle(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rt.rm(), rs); } else { @@ -1167,7 +1166,7 @@ void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { xori(rd, rd, 1); } -void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sleu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rt.rm(), rs); } else { @@ -1181,17 +1180,17 @@ void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { xori(rd, rd, 1); } -void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sge(Register rd, Register rs, const Operand& rt) { Slt(rd, rs, rt); xori(rd, rd, 1); } -void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { Sltu(rd, rs, rt); xori(rd, rd, 1); } -void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgt(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { slt(rd, rt.rm(), rs); } else { @@ -1204,7 +1203,7 @@ void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sltu(rd, rt.rm(), rs); } else { @@ -1218,7 +1217,7 @@ void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sllw(rd, rs, rt.rm()); } else { @@ -1227,7 +1226,7 @@ void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sraw(rd, rs, rt.rm()); } else { @@ -1236,7 +1235,7 @@ void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { srlw(rd, rs, rt.rm()); } else { @@ -1245,11 +1244,11 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SraWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) { Sra64(rd, rs, rt); } -void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sra64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sra(rd, rs, rt.rm()); } else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && @@ -1262,11 +1261,11 @@ void TurboAssembler::Sra64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SrlWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) { Srl64(rd, rs, rt); } -void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Srl64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { srl(rd, rs, rt.rm()); } else if (v8_flags.riscv_c_extension && (rd.code() == rs.code()) && @@ -1279,11 +1278,11 @@ void TurboAssembler::Srl64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SllWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) { Sll64(rd, rs, rt); } -void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sll64(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sll(rd, rs, rt.rm()); } else { @@ -1297,7 +1296,7 @@ void TurboAssembler::Sll64(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -1322,7 +1321,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -1345,11 +1344,11 @@ void TurboAssembler::Dror(Register rd, Register rs, const Operand& rt) { } } #elif V8_TARGET_ARCH_RISCV32 -void TurboAssembler::SllWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SllWord(Register rd, Register rs, const Operand& rt) { Sll32(rd, rs, rt); } -void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sll32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sll(rd, rs, rt.rm()); } else { @@ -1358,11 +1357,11 @@ void TurboAssembler::Sll32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SraWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SraWord(Register rd, Register rs, const Operand& rt) { Sra32(rd, rs, rt); } -void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Sra32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { sra(rd, rs, rt.rm()); } else { @@ -1371,11 +1370,11 @@ void TurboAssembler::Sra32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::SrlWord(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::SrlWord(Register rd, Register rs, const Operand& rt) { Srl32(rd, rs, rt); } -void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Srl32(Register rd, Register rs, const Operand& rt) { if (rt.is_reg()) { srl(rd, rs, rt.rm()); } else { @@ -1384,7 +1383,7 @@ void TurboAssembler::Srl32(Register rd, Register rs, const Operand& rt) { } } -void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { +void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -1408,7 +1407,7 @@ void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { } #endif -void TurboAssembler::Li(Register rd, intptr_t imm) { +void MacroAssembler::Li(Register rd, intptr_t imm) { if (v8_flags.riscv_c_extension && (rd != zero_reg) && is_int6(imm)) { c_li(rd, imm); } else { @@ -1416,7 +1415,7 @@ void TurboAssembler::Li(Register rd, intptr_t imm) { } } -void TurboAssembler::Mv(Register rd, const Operand& rt) { +void MacroAssembler::Mv(Register rd, const Operand& rt) { if (v8_flags.riscv_c_extension && (rd != zero_reg) && (rt.rm() != zero_reg)) { c_mv(rd, rt.rm()); } else { @@ -1424,7 +1423,7 @@ void TurboAssembler::Mv(Register rd, const Operand& rt) { } } -void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs, +void MacroAssembler::CalcScaledAddress(Register rd, Register rt, Register rs, uint8_t sa) { DCHECK(sa >= 1 && sa <= 31); UseScratchRegisterScope temps(this); @@ -1437,7 +1436,7 @@ void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs, // ------------Pseudo-instructions------------- // Change endianness #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size, +void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size, Register scratch) { DCHECK_NE(scratch, rs); DCHECK_NE(scratch, rd); @@ -1495,7 +1494,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size, } #elif V8_TARGET_ARCH_RISCV32 -void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size, +void MacroAssembler::ByteSwap(Register rd, Register rs, int operand_size, Register scratch) { DCHECK_NE(scratch, rs); DCHECK_NE(scratch, rd); @@ -1522,7 +1521,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size, #endif template -void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs, +void MacroAssembler::LoadNBytes(Register rd, const MemOperand& rs, Register scratch) { DCHECK(rd != rs.rm() && rd != scratch); DCHECK_LE(NBYTES, 8); @@ -1544,7 +1543,7 @@ void TurboAssembler::LoadNBytes(Register rd, const MemOperand& rs, } template -void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs, +void MacroAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs, Register scratch0, Register scratch1) { // This function loads nbytes from memory specified by rs and into rs.rm() @@ -1573,7 +1572,7 @@ void TurboAssembler::LoadNBytesOverwritingBaseReg(const MemOperand& rs, } template -void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) { +void MacroAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) { BlockTrampolinePoolScope block_trampoline_pool(this); UseScratchRegisterScope temps(this); @@ -1604,7 +1603,7 @@ void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) { #if V8_TARGET_ARCH_RISCV64 template -void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, +void MacroAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, Register scratch_base) { DCHECK(NBYTES == 4 || NBYTES == 8); DCHECK_NE(scratch_base, rs.rm()); @@ -1629,7 +1628,7 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, } #elif V8_TARGET_ARCH_RISCV32 template -void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, +void MacroAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, Register scratch_base) { DCHECK_EQ(NBYTES, 4); DCHECK_NE(scratch_base, rs.rm()); @@ -1650,7 +1649,7 @@ void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs, fmv_w_x(frd, scratch); } -void TurboAssembler::UnalignedDoubleHelper(FPURegister frd, +void MacroAssembler::UnalignedDoubleHelper(FPURegister frd, const MemOperand& rs, Register scratch_base) { DCHECK_NE(scratch_base, rs.rm()); @@ -1679,7 +1678,7 @@ void TurboAssembler::UnalignedDoubleHelper(FPURegister frd, #endif template -void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs, +void MacroAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs, Register scratch_other) { DCHECK(scratch_other != rs.rm()); DCHECK_LE(NBYTES, 8); @@ -1718,7 +1717,7 @@ void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs, #if V8_TARGET_ARCH_RISCV64 template -void TurboAssembler::UnalignedFStoreHelper(FPURegister frd, +void MacroAssembler::UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs, Register scratch) { DCHECK(NBYTES == 8 || NBYTES == 4); @@ -1732,7 +1731,7 @@ void TurboAssembler::UnalignedFStoreHelper(FPURegister frd, } #elif V8_TARGET_ARCH_RISCV32 template -void TurboAssembler::UnalignedFStoreHelper(FPURegister frd, +void MacroAssembler::UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs, Register scratch) { DCHECK_EQ(NBYTES, 4); @@ -1740,7 +1739,7 @@ void TurboAssembler::UnalignedFStoreHelper(FPURegister frd, fmv_x_w(scratch, frd); UnalignedStoreHelper(scratch, rs); } -void TurboAssembler::UnalignedDStoreHelper(FPURegister frd, +void MacroAssembler::UnalignedDStoreHelper(FPURegister frd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); @@ -1757,7 +1756,7 @@ void TurboAssembler::UnalignedDStoreHelper(FPURegister frd, #endif template -void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs, +void MacroAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator) { MemOperand source = rs; UseScratchRegisterScope temps(this); @@ -1771,7 +1770,7 @@ void TurboAssembler::AlignedLoadHelper(Reg_T target, const MemOperand& rs, } template -void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs, +void MacroAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs, Func generator) { MemOperand source = rs; UseScratchRegisterScope temps(this); @@ -1787,32 +1786,32 @@ void TurboAssembler::AlignedStoreHelper(Reg_T value, const MemOperand& rs, generator(value, source); } -void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { UnalignedLoadHelper<4, true>(rd, rs); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Ulwu(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) { UnalignedLoadHelper<4, false>(rd, rs); } #endif -void TurboAssembler::Usw(Register rd, const MemOperand& rs) { +void MacroAssembler::Usw(Register rd, const MemOperand& rs) { UnalignedStoreHelper<4>(rd, rs); } -void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulh(Register rd, const MemOperand& rs) { UnalignedLoadHelper<2, true>(rd, rs); } -void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { +void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) { UnalignedLoadHelper<2, false>(rd, rs); } -void TurboAssembler::Ush(Register rd, const MemOperand& rs) { +void MacroAssembler::Ush(Register rd, const MemOperand& rs) { UnalignedStoreHelper<2>(rd, rs); } -void TurboAssembler::Uld(Register rd, const MemOperand& rs) { +void MacroAssembler::Uld(Register rd, const MemOperand& rs) { UnalignedLoadHelper<8, true>(rd, rs); } #if V8_TARGET_ARCH_RISCV64 @@ -1838,23 +1837,23 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) { } #endif -void TurboAssembler::Usd(Register rd, const MemOperand& rs) { +void MacroAssembler::Usd(Register rd, const MemOperand& rs) { UnalignedStoreHelper<8>(rd, rs); } -void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs, +void MacroAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); UnalignedFLoadHelper<4>(fd, rs, scratch); } -void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs, +void MacroAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); UnalignedFStoreHelper<4>(fd, rs, scratch); } -void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs, +void MacroAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); #if V8_TARGET_ARCH_RISCV64 @@ -1864,7 +1863,7 @@ void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs, #endif } -void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs, +void MacroAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs, Register scratch) { DCHECK_NE(scratch, rs.rm()); #if V8_TARGET_ARCH_RISCV64 @@ -1874,49 +1873,49 @@ void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs, #endif } -void TurboAssembler::Lb(Register rd, const MemOperand& rs) { +void MacroAssembler::Lb(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lb(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Lbu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lbu(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lbu(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Sb(Register rd, const MemOperand& rs) { +void MacroAssembler::Sb(Register rd, const MemOperand& rs) { auto fn = [this](Register value, const MemOperand& source) { this->sb(value, source.rm(), source.offset()); }; AlignedStoreHelper(rd, rs, fn); } -void TurboAssembler::Lh(Register rd, const MemOperand& rs) { +void MacroAssembler::Lh(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lh(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Lhu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lhu(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lhu(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Sh(Register rd, const MemOperand& rs) { +void MacroAssembler::Sh(Register rd, const MemOperand& rs) { auto fn = [this](Register value, const MemOperand& source) { this->sh(value, source.rm(), source.offset()); }; AlignedStoreHelper(rd, rs, fn); } -void TurboAssembler::Lw(Register rd, const MemOperand& rs) { +void MacroAssembler::Lw(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -1934,14 +1933,14 @@ void TurboAssembler::Lw(Register rd, const MemOperand& rs) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Lwu(Register rd, const MemOperand& rs) { +void MacroAssembler::Lwu(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { this->lwu(target, source.rm(), source.offset()); }; AlignedLoadHelper(rd, rs, fn); } #endif -void TurboAssembler::Sw(Register rd, const MemOperand& rs) { +void MacroAssembler::Sw(Register rd, const MemOperand& rs) { auto fn = [this](Register value, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -1958,7 +1957,7 @@ void TurboAssembler::Sw(Register rd, const MemOperand& rs) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Ld(Register rd, const MemOperand& rs) { +void MacroAssembler::Ld(Register rd, const MemOperand& rs) { auto fn = [this](Register target, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -1975,7 +1974,7 @@ void TurboAssembler::Ld(Register rd, const MemOperand& rs) { AlignedLoadHelper(rd, rs, fn); } -void TurboAssembler::Sd(Register rd, const MemOperand& rs) { +void MacroAssembler::Sd(Register rd, const MemOperand& rs) { auto fn = [this](Register value, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -1991,21 +1990,21 @@ void TurboAssembler::Sd(Register rd, const MemOperand& rs) { AlignedStoreHelper(rd, rs, fn); } #endif -void TurboAssembler::LoadFloat(FPURegister fd, const MemOperand& src) { +void MacroAssembler::LoadFloat(FPURegister fd, const MemOperand& src) { auto fn = [this](FPURegister target, const MemOperand& source) { this->flw(target, source.rm(), source.offset()); }; AlignedLoadHelper(fd, src, fn); } -void TurboAssembler::StoreFloat(FPURegister fs, const MemOperand& src) { +void MacroAssembler::StoreFloat(FPURegister fs, const MemOperand& src) { auto fn = [this](FPURegister value, const MemOperand& source) { this->fsw(value, source.rm(), source.offset()); }; AlignedStoreHelper(fs, src, fn); } -void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) { +void MacroAssembler::LoadDouble(FPURegister fd, const MemOperand& src) { auto fn = [this](FPURegister target, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((target.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -2021,7 +2020,7 @@ void TurboAssembler::LoadDouble(FPURegister fd, const MemOperand& src) { AlignedLoadHelper(fd, src, fn); } -void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) { +void MacroAssembler::StoreDouble(FPURegister fs, const MemOperand& src) { auto fn = [this](FPURegister value, const MemOperand& source) { if (v8_flags.riscv_c_extension && ((value.code() & 0b11000) == 0b01000) && ((source.rm().code() & 0b11000) == 0b01000) && @@ -2037,7 +2036,7 @@ void TurboAssembler::StoreDouble(FPURegister fs, const MemOperand& src) { AlignedStoreHelper(fs, src, fn); } -void TurboAssembler::Ll(Register rd, const MemOperand& rs) { +void MacroAssembler::Ll(Register rd, const MemOperand& rs) { bool is_one_instruction = rs.offset() == 0; if (is_one_instruction) { lr_w(false, false, rd, rs.rm()); @@ -2050,7 +2049,7 @@ void TurboAssembler::Ll(Register rd, const MemOperand& rs) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Lld(Register rd, const MemOperand& rs) { +void MacroAssembler::Lld(Register rd, const MemOperand& rs) { bool is_one_instruction = rs.offset() == 0; if (is_one_instruction) { lr_d(false, false, rd, rs.rm()); @@ -2062,7 +2061,7 @@ void TurboAssembler::Lld(Register rd, const MemOperand& rs) { } } #endif -void TurboAssembler::Sc(Register rd, const MemOperand& rs) { +void MacroAssembler::Sc(Register rd, const MemOperand& rs) { bool is_one_instruction = rs.offset() == 0; if (is_one_instruction) { sc_w(false, false, rd, rs.rm(), rd); @@ -2074,7 +2073,7 @@ void TurboAssembler::Sc(Register rd, const MemOperand& rs) { } } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Scd(Register rd, const MemOperand& rs) { +void MacroAssembler::Scd(Register rd, const MemOperand& rs) { bool is_one_instruction = rs.offset() == 0; if (is_one_instruction) { sc_d(false, false, rd, rs.rm(), rd); @@ -2086,7 +2085,7 @@ void TurboAssembler::Scd(Register rd, const MemOperand& rs) { } } #endif -void TurboAssembler::li(Register dst, Handle value, +void MacroAssembler::li(Register dst, Handle value, RelocInfo::Mode rmode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than @@ -2104,7 +2103,7 @@ void TurboAssembler::li(Register dst, Handle value, } } -void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { +void MacroAssembler::li(Register dst, ExternalReference value, LiFlags mode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -2124,7 +2123,7 @@ static inline int InstrCountForLiLower32Bit(int64_t value) { return 2; } -int TurboAssembler::InstrCountForLi64Bit(int64_t value) { +int MacroAssembler::InstrCountForLi64Bit(int64_t value) { if (is_int32(value + 0x800)) { return InstrCountForLiLower32Bit(value); } else { @@ -2134,14 +2133,14 @@ int TurboAssembler::InstrCountForLi64Bit(int64_t value) { return INT_MAX; } -void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li_optimized(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); DCHECK(!MustUseReg(j.rmode())); DCHECK(mode == OPTIMIZE_SIZE); Li(rd, j.immediate()); } -void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { +void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { DCHECK(!j.is_reg()); BlockTrampolinePoolScope block_trampoline_pool(this); if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { @@ -2188,7 +2187,7 @@ static RegList t_regs = {t0, t1, t2, t3, t4, t5, t6}; static RegList a_regs = {a0, a1, a2, a3, a4, a5, a6, a7}; static RegList s_regs = {s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11}; -void TurboAssembler::MultiPush(RegList regs) { +void MacroAssembler::MultiPush(RegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kSystemPointerSize; @@ -2232,7 +2231,7 @@ void TurboAssembler::MultiPush(RegList regs) { #undef S_REGS } -void TurboAssembler::MultiPop(RegList regs) { +void MacroAssembler::MultiPop(RegList regs) { int16_t stack_offset = 0; #define TEST_AND_POP_REG(reg) \ @@ -2273,7 +2272,7 @@ void TurboAssembler::MultiPop(RegList regs) { #undef A_REGS } -void TurboAssembler::MultiPushFPU(DoubleRegList regs) { +void MacroAssembler::MultiPushFPU(DoubleRegList regs) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -2286,7 +2285,7 @@ void TurboAssembler::MultiPushFPU(DoubleRegList regs) { } } -void TurboAssembler::MultiPopFPU(DoubleRegList regs) { +void MacroAssembler::MultiPopFPU(DoubleRegList regs) { int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -2299,7 +2298,7 @@ void TurboAssembler::MultiPopFPU(DoubleRegList regs) { } #if V8_TARGET_ARCH_RISCV32 -void TurboAssembler::AddPair(Register dst_low, Register dst_high, +void MacroAssembler::AddPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high, Register scratch1, Register scratch2) { @@ -2317,7 +2316,7 @@ void TurboAssembler::AddPair(Register dst_low, Register dst_high, Move(dst_low, scratch1); } -void TurboAssembler::SubPair(Register dst_low, Register dst_high, +void MacroAssembler::SubPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high, Register scratch1, Register scratch2) { @@ -2335,27 +2334,27 @@ void TurboAssembler::SubPair(Register dst_low, Register dst_high, Move(dst_low, scratch1); } -void TurboAssembler::AndPair(Register dst_low, Register dst_high, +void MacroAssembler::AndPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high) { And(dst_low, left_low, right_low); And(dst_high, left_high, right_high); } -void TurboAssembler::OrPair(Register dst_low, Register dst_high, +void MacroAssembler::OrPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high) { Or(dst_low, left_low, right_low); Or(dst_high, left_high, right_high); } -void TurboAssembler::XorPair(Register dst_low, Register dst_high, +void MacroAssembler::XorPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high) { Xor(dst_low, left_low, right_low); Xor(dst_high, left_high, right_high); } -void TurboAssembler::MulPair(Register dst_low, Register dst_high, +void MacroAssembler::MulPair(Register dst_low, Register dst_high, Register left_low, Register left_high, Register right_low, Register right_high, Register scratch1, Register scratch2) { @@ -2381,7 +2380,7 @@ void TurboAssembler::MulPair(Register dst_low, Register dst_high, Add32(dst_high, scratch2, scratch3); } -void TurboAssembler::ShlPair(Register dst_low, Register dst_high, +void MacroAssembler::ShlPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift, Register scratch1, Register scratch2) { @@ -2426,7 +2425,7 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShlPair(Register dst_low, Register dst_high, +void MacroAssembler::ShlPair(Register dst_low, Register dst_high, Register src_low, Register src_high, int32_t shift, Register scratch1, Register scratch2) { DCHECK_GE(63, shift); @@ -2451,7 +2450,7 @@ void TurboAssembler::ShlPair(Register dst_low, Register dst_high, } } -void TurboAssembler::ShrPair(Register dst_low, Register dst_high, +void MacroAssembler::ShrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift, Register scratch1, Register scratch2) { @@ -2496,7 +2495,7 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::ShrPair(Register dst_low, Register dst_high, +void MacroAssembler::ShrPair(Register dst_low, Register dst_high, Register src_low, Register src_high, int32_t shift, Register scratch1, Register scratch2) { DCHECK_GE(63, shift); @@ -2521,7 +2520,7 @@ void TurboAssembler::ShrPair(Register dst_low, Register dst_high, } } -void TurboAssembler::SarPair(Register dst_low, Register dst_high, +void MacroAssembler::SarPair(Register dst_low, Register dst_high, Register src_low, Register src_high, Register shift, Register scratch1, Register scratch2) { @@ -2564,7 +2563,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high, bind(&done); } -void TurboAssembler::SarPair(Register dst_low, Register dst_high, +void MacroAssembler::SarPair(Register dst_low, Register dst_high, Register src_low, Register src_high, int32_t shift, Register scratch1, Register scratch2) { DCHECK_GE(63, shift); @@ -2589,7 +2588,7 @@ void TurboAssembler::SarPair(Register dst_low, Register dst_high, } #endif -void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos, +void MacroAssembler::ExtractBits(Register rt, Register rs, uint16_t pos, uint16_t size, bool sign_extend) { #if V8_TARGET_ARCH_RISCV64 DCHECK(pos < 64 && 0 < size && size <= 64 && 0 < pos + size && @@ -2615,7 +2614,7 @@ void TurboAssembler::ExtractBits(Register rt, Register rs, uint16_t pos, #endif } -void TurboAssembler::InsertBits(Register dest, Register source, Register pos, +void MacroAssembler::InsertBits(Register dest, Register source, Register pos, int size) { #if V8_TARGET_ARCH_RISCV64 DCHECK_LT(size, 64); @@ -2641,42 +2640,42 @@ void TurboAssembler::InsertBits(Register dest, Register source, Register pos, or_(dest, dest, source_); } -void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); } +void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) { fneg_s(fd, fs); } -void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); } +void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) { fneg_d(fd, fs); } -void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_uw(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_d_wu(fd, rs); } -void TurboAssembler::Cvt_d_w(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_w(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_d_w(fd, rs); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Cvt_d_ul(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_d_ul(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_d_lu(fd, rs); } #endif -void TurboAssembler::Cvt_s_uw(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_uw(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_s_wu(fd, rs); } -void TurboAssembler::Cvt_s_w(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_w(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_s_w(fd, rs); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Cvt_s_ul(FPURegister fd, Register rs) { +void MacroAssembler::Cvt_s_ul(FPURegister fd, Register rs) { // Convert rs to a FP value in fd. fcvt_s_lu(fd, rs); } #endif template -void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs, +void MacroAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, CvtFunc fcvt_generator) { // Save csr_fflags to scratch & clear exception flags @@ -2705,7 +2704,7 @@ void TurboAssembler::RoundFloatingPointToInteger(Register rd, FPURegister fs, } } -void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) { +void MacroAssembler::Clear_if_nan_d(Register rd, FPURegister fs) { Label no_nan; feq_d(kScratchReg, fs, fs); bnez(kScratchReg, &no_nan); @@ -2713,7 +2712,7 @@ void TurboAssembler::Clear_if_nan_d(Register rd, FPURegister fs) { bind(&no_nan); } -void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) { +void MacroAssembler::Clear_if_nan_s(Register rd, FPURegister fs) { Label no_nan; feq_s(kScratchReg, fs, fs); bnez(kScratchReg, &no_nan); @@ -2721,101 +2720,101 @@ void TurboAssembler::Clear_if_nan_s(Register rd, FPURegister fs) { bind(&no_nan); } -void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_uw_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_wu_d(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_wu_d(dst, src, RTZ); }); } -void TurboAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_w_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_d(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_d(dst, src, RTZ); }); } -void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_uw_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_wu_s(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_wu_s(dst, src, RTZ); }); } -void TurboAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_w_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_s(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_s(dst, src, RTZ); }); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_ul_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_lu_d(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_lu_d(dst, src, RTZ); }); } -void TurboAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_l_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_l_d(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_l_d(dst, src, RTZ); }); } -void TurboAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_ul_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_lu_s(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_lu_s(dst, src, RTZ); }); } -void TurboAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Trunc_l_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_l_s(dst, src, RTZ); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_l_s(dst, src, RTZ); }); } #endif -void TurboAssembler::Round_w_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Round_w_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_s(dst, src, RNE); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_s(dst, src, RNE); }); } -void TurboAssembler::Round_w_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Round_w_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_d(dst, src, RNE); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_d(dst, src, RNE); }); } -void TurboAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Ceil_w_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_s(dst, src, RUP); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_s(dst, src, RUP); }); } -void TurboAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Ceil_w_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_d(dst, src, RUP); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_d(dst, src, RUP); }); } -void TurboAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Floor_w_s(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_s(dst, src, RDN); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_s(dst, src, RDN); }); } -void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) { +void MacroAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) { RoundFloatingPointToInteger( - rd, fs, result, [](TurboAssembler* tasm, Register dst, FPURegister src) { - tasm->fcvt_w_d(dst, src, RDN); + rd, fs, result, [](MacroAssembler* masm, Register dst, FPURegister src) { + masm->fcvt_w_d(dst, src, RDN); }); } @@ -2826,7 +2825,7 @@ void TurboAssembler::Floor_w_d(Register rd, FPURegister fs, Register result) { // handling is needed by NaN, +/-Infinity, +/-0 #if V8_TARGET_ARCH_RISCV64 template -void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src, +void MacroAssembler::RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch, FPURoundingMode frm) { BlockTrampolinePoolScope block_trampoline_pool(this); UseScratchRegisterScope temps(this); @@ -2945,7 +2944,7 @@ void TurboAssembler::RoundHelper(FPURegister dst, FPURegister src, // rounded result; this differs from behavior of RISCV fcvt instructions (which // round out-of-range values to the nearest max or min value), therefore special // handling is needed by NaN, +/-Infinity, +/-0 -void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, +void MacroAssembler::RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch, FPURoundingMode frm) { BlockTrampolinePoolScope block_trampoline_pool(this); UseScratchRegisterScope temps(this); @@ -3038,7 +3037,7 @@ void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, // round out-of-range values to the nearest max or min value), therefore special // handling is needed by NaN, +/-Infinity, +/-0 template -void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch, +void MacroAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch, VRegister v_scratch, FPURoundingMode frm) { VU.set(scratch, std::is_same::value ? E32 : E64, m1); // if src is NaN/+-Infinity/+-Zero or if the exponent is larger than # of bits @@ -3092,69 +3091,69 @@ void TurboAssembler::RoundHelper(VRegister dst, VRegister src, Register scratch, } } -void TurboAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Ceil_f(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RUP); } -void TurboAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Ceil_d(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RUP); } -void TurboAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Floor_f(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RDN); } -void TurboAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Floor_d(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RDN); } -void TurboAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Trunc_d(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RTZ); } -void TurboAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Trunc_f(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RTZ); } -void TurboAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Round_f(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RNE); } -void TurboAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch, +void MacroAssembler::Round_d(VRegister vdst, VRegister vsrc, Register scratch, VRegister v_scratch) { RoundHelper(vdst, vsrc, scratch, v_scratch, RNE); } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src, +void MacroAssembler::Floor_d_d(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { RoundHelper(dst, src, fpu_scratch, RDN); } -void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src, +void MacroAssembler::Ceil_d_d(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { RoundHelper(dst, src, fpu_scratch, RUP); } -void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src, +void MacroAssembler::Trunc_d_d(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { RoundHelper(dst, src, fpu_scratch, RTZ); } -void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src, +void MacroAssembler::Round_d_d(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { RoundHelper(dst, src, fpu_scratch, RNE); } #endif -void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src, +void MacroAssembler::Floor_s_s(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { #if V8_TARGET_ARCH_RISCV64 RoundHelper(dst, src, fpu_scratch, RDN); @@ -3163,7 +3162,7 @@ void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src, #endif } -void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src, +void MacroAssembler::Ceil_s_s(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { #if V8_TARGET_ARCH_RISCV64 RoundHelper(dst, src, fpu_scratch, RUP); @@ -3172,7 +3171,7 @@ void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src, #endif } -void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src, +void MacroAssembler::Trunc_s_s(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { #if V8_TARGET_ARCH_RISCV64 RoundHelper(dst, src, fpu_scratch, RTZ); @@ -3181,7 +3180,7 @@ void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src, #endif } -void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src, +void MacroAssembler::Round_s_s(FPURegister dst, FPURegister src, FPURegister fpu_scratch) { #if V8_TARGET_ARCH_RISCV64 RoundHelper(dst, src, fpu_scratch, RNE); @@ -3210,7 +3209,7 @@ void MacroAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, fmsub_d(fd, fs, ft, fr); } -void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1, +void MacroAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { switch (cc) { case EQ: @@ -3237,7 +3236,7 @@ void TurboAssembler::CompareF32(Register rd, FPUCondition cc, FPURegister cmp1, } } -void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1, +void MacroAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { switch (cc) { case EQ: @@ -3264,7 +3263,7 @@ void TurboAssembler::CompareF64(Register rd, FPUCondition cc, FPURegister cmp1, } } -void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1, +void MacroAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1, FPURegister cmp2) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3275,7 +3274,7 @@ void TurboAssembler::CompareIsNotNanF32(Register rd, FPURegister cmp1, And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2) } -void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1, +void MacroAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1, FPURegister cmp2) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3286,27 +3285,27 @@ void TurboAssembler::CompareIsNotNanF64(Register rd, FPURegister cmp1, And(rd, rd, scratch); // rd <- !isNan(cmp1) && !isNan(cmp2) } -void TurboAssembler::CompareIsNanF32(Register rd, FPURegister cmp1, +void MacroAssembler::CompareIsNanF32(Register rd, FPURegister cmp1, FPURegister cmp2) { CompareIsNotNanF32(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2) Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2) } -void TurboAssembler::CompareIsNanF64(Register rd, FPURegister cmp1, +void MacroAssembler::CompareIsNanF64(Register rd, FPURegister cmp1, FPURegister cmp2) { CompareIsNotNanF64(rd, cmp1, cmp2); // rd <- !isNan(cmp1) && !isNan(cmp2) Xor(rd, rd, 1); // rd <- isNan(cmp1) || isNan(cmp2) } -void TurboAssembler::BranchTrueShortF(Register rs, Label* target) { +void MacroAssembler::BranchTrueShortF(Register rs, Label* target) { Branch(target, not_equal, rs, Operand(zero_reg)); } -void TurboAssembler::BranchFalseShortF(Register rs, Label* target) { +void MacroAssembler::BranchFalseShortF(Register rs, Label* target) { Branch(target, equal, rs, Operand(zero_reg)); } -void TurboAssembler::BranchTrueF(Register rs, Label* target) { +void MacroAssembler::BranchTrueF(Register rs, Label* target) { bool long_branch = target->is_bound() ? !is_near(target) : is_trampoline_emitted(); if (long_branch) { @@ -3319,7 +3318,7 @@ void TurboAssembler::BranchTrueF(Register rs, Label* target) { } } -void TurboAssembler::BranchFalseF(Register rs, Label* target) { +void MacroAssembler::BranchFalseF(Register rs, Label* target) { bool long_branch = target->is_bound() ? !is_near(target) : is_trampoline_emitted(); if (long_branch) { @@ -3332,7 +3331,7 @@ void TurboAssembler::BranchFalseF(Register rs, Label* target) { } } -void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) { +void MacroAssembler::InsertHighWordF64(FPURegister dst, Register src_high) { #if V8_TARGET_ARCH_RISCV64 UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3357,7 +3356,7 @@ void TurboAssembler::InsertHighWordF64(FPURegister dst, Register src_high) { #endif } -void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) { +void MacroAssembler::InsertLowWordF64(FPURegister dst, Register src_low) { #if V8_TARGET_ARCH_RISCV64 UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3382,7 +3381,7 @@ void TurboAssembler::InsertLowWordF64(FPURegister dst, Register src_low) { #endif } -void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) { +void MacroAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) { ASM_CODE_COMMENT(this); // Handle special values first. if (src == base::bit_cast(0.0f) && has_single_zero_reg_set_) { @@ -3408,7 +3407,7 @@ void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint32_t src) { } } -void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) { +void MacroAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) { ASM_CODE_COMMENT(this); // Handle special values first. if (src == base::bit_cast(0.0) && has_double_zero_reg_set_) { @@ -3459,7 +3458,7 @@ void TurboAssembler::LoadFPRImmediate(FPURegister dst, uint64_t src) { } } -void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt, +void MacroAssembler::CompareI(Register rd, Register rs, const Operand& rt, Condition cond) { switch (cond) { case eq: @@ -3504,7 +3503,7 @@ void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt, } // dest <- (condition != 0 ? zero : dest) -void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, +void MacroAssembler::LoadZeroIfConditionNotZero(Register dest, Register condition) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3515,7 +3514,7 @@ void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, } // dest <- (condition == 0 ? 0 : dest) -void TurboAssembler::LoadZeroIfConditionZero(Register dest, +void MacroAssembler::LoadZeroIfConditionZero(Register dest, Register condition) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3525,7 +3524,7 @@ void TurboAssembler::LoadZeroIfConditionZero(Register dest, and_(dest, dest, scratch); } -void TurboAssembler::Clz32(Register rd, Register xx) { +void MacroAssembler::Clz32(Register rd, Register xx) { // 32 bit unsigned in lower word: count number of leading zeros. // int n = 32; // unsigned y; @@ -3602,7 +3601,7 @@ void TurboAssembler::Clz32(Register rd, Register xx) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Clz64(Register rd, Register xx) { +void MacroAssembler::Clz64(Register rd, Register xx) { // 64 bit: count number of leading zeros. // int n = 64; // unsigned y; @@ -3656,7 +3655,7 @@ void TurboAssembler::Clz64(Register rd, Register xx) { bind(&L5); } #endif -void TurboAssembler::Ctz32(Register rd, Register rs) { +void MacroAssembler::Ctz32(Register rd, Register rs) { // Convert trailing zeroes to trailing ones, and bits to their left // to zeroes. @@ -3680,7 +3679,7 @@ void TurboAssembler::Ctz32(Register rd, Register rs) { } } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Ctz64(Register rd, Register rs) { +void MacroAssembler::Ctz64(Register rd, Register rs) { // Convert trailing zeroes to trailing ones, and bits to their left // to zeroes. BlockTrampolinePoolScope block_trampoline_pool(this); @@ -3703,7 +3702,7 @@ void TurboAssembler::Ctz64(Register rd, Register rs) { } } #endif -void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) { +void MacroAssembler::Popcnt32(Register rd, Register rs, Register scratch) { DCHECK_NE(scratch, rs); DCHECK_NE(scratch, rd); // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel @@ -3754,7 +3753,7 @@ void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) { +void MacroAssembler::Popcnt64(Register rd, Register rs, Register scratch) { DCHECK_NE(scratch, rs); DCHECK_NE(scratch, rd); // uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3 @@ -3790,7 +3789,7 @@ void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) { srli(rd, rd, 32 + shift); } #endif -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { UseScratchRegisterScope temps(this); @@ -3801,7 +3800,7 @@ void TurboAssembler::TryInlineTruncateDoubleToI(Register result, Branch(done, eq, scratch, Operand(1)); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -3837,19 +3836,19 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) -void TurboAssembler::Branch(int32_t offset) { +void MacroAssembler::Branch(int32_t offset) { DCHECK(is_int21(offset)); BranchShort(offset); } -void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs, +void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs, const Operand& rt, Label::Distance near_jump) { bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt); DCHECK(is_near); USE(is_near); } -void TurboAssembler::Branch(Label* L) { +void MacroAssembler::Branch(Label* L) { if (L->is_bound()) { if (is_near(L)) { BranchShort(L); @@ -3865,7 +3864,7 @@ void TurboAssembler::Branch(Label* L) { } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, +void MacroAssembler::Branch(Label* L, Condition cond, Register rs, const Operand& rt, Label::Distance near_jump) { if (L->is_bound()) { if (!BranchShortCheck(0, L, cond, rs, rt)) { @@ -3898,7 +3897,7 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs, } } -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, +void MacroAssembler::Branch(Label* L, Condition cond, Register rs, RootIndex index) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -3906,20 +3905,20 @@ void TurboAssembler::Branch(Label* L, Condition cond, Register rs, Branch(L, cond, rs, Operand(scratch)); } -void TurboAssembler::BranchShortHelper(int32_t offset, Label* L) { +void MacroAssembler::BranchShortHelper(int32_t offset, Label* L) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset21); j(offset); } -void TurboAssembler::BranchShort(int32_t offset) { +void MacroAssembler::BranchShort(int32_t offset) { DCHECK(is_int21(offset)); BranchShortHelper(offset, nullptr); } -void TurboAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); } +void MacroAssembler::BranchShort(Label* L) { BranchShortHelper(0, L); } -int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { +int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { if (L) { offset = branch_offset_helper(L, bits); } else { @@ -3928,7 +3927,7 @@ int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { return offset; } -Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, +Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt, Register scratch) { Register r2 = no_reg; if (rt.is_reg()) { @@ -3941,14 +3940,14 @@ Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, return r2; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, +bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits) { if (!is_near(L, bits)) return false; *offset = GetOffset(*offset, L, bits); return true; } -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, +bool MacroAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, Register* scratch, const Operand& rt) { if (!is_near(L, bits)) return false; *scratch = GetRtAsRegisterHelper(rt, *scratch); @@ -3956,7 +3955,7 @@ bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, return true; } -bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, +bool MacroAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { DCHECK(L == nullptr || offset == 0); UseScratchRegisterScope temps(this); @@ -4084,7 +4083,7 @@ bool TurboAssembler::BranchShortHelper(int32_t offset, Label* L, Condition cond, return true; } -bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, +bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { BRANCH_ARGS_CHECK(cond, rs, rt); @@ -4097,28 +4096,28 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, } } -void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs, +void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs, const Operand& rt) { BranchShortCheck(offset, nullptr, cond, rs, rt); } -void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs, +void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, const Operand& rt) { BranchShortCheck(0, L, cond, rs, rt); } -void TurboAssembler::BranchAndLink(int32_t offset) { +void MacroAssembler::BranchAndLink(int32_t offset) { BranchAndLinkShort(offset); } -void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, +void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, const Operand& rt) { bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt); DCHECK(is_near); USE(is_near); } -void TurboAssembler::BranchAndLink(Label* L) { +void MacroAssembler::BranchAndLink(Label* L) { if (L->is_bound()) { if (is_near(L)) { BranchAndLinkShort(L); @@ -4134,7 +4133,7 @@ void TurboAssembler::BranchAndLink(Label* L) { } } -void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, +void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, const Operand& rt) { if (L->is_bound()) { if (!BranchAndLinkShortCheck(0, L, cond, rs, rt)) { @@ -4157,25 +4156,25 @@ void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, } } -void TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) { +void MacroAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L) { DCHECK(L == nullptr || offset == 0); offset = GetOffset(offset, L, OffsetSize::kOffset21); jal(offset); } -void TurboAssembler::BranchAndLinkShort(int32_t offset) { +void MacroAssembler::BranchAndLinkShort(int32_t offset) { DCHECK(is_int21(offset)); BranchAndLinkShortHelper(offset, nullptr); } -void TurboAssembler::BranchAndLinkShort(Label* L) { +void MacroAssembler::BranchAndLinkShort(Label* L) { BranchAndLinkShortHelper(0, L); } // Pre r6 we need to use a bgezal or bltzal, but they can't be used directly // with the slt instructions. We could use sub or add instead but we would miss // overflow cases, so we keep slt and add an intermediate third instruction. -bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { DCHECK(L == nullptr || offset == 0); @@ -4198,7 +4197,7 @@ bool TurboAssembler::BranchAndLinkShortHelper(int32_t offset, Label* L, return true; } -bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, +bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, Register rs, const Operand& rt) { BRANCH_ARGS_CHECK(cond, rs, rt); @@ -4212,20 +4211,20 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); - LoadTaggedPointerField( - destination, FieldMemOperand(destination, FixedArray::OffsetOfElementAt( - constant_index))); + LoadTaggedField(destination, + FieldMemOperand(destination, FixedArray::OffsetOfElementAt( + constant_index))); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { LoadWord(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { Move(destination, kRootRegister); @@ -4234,7 +4233,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -void TurboAssembler::Jump(Register target, Condition cond, Register rs, +void MacroAssembler::Jump(Register target, Condition cond, Register rs, const Operand& rt) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { @@ -4247,7 +4246,7 @@ void TurboAssembler::Jump(Register target, Condition cond, Register rs, } } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { Label skip; if (cond != cc_always) { @@ -4262,13 +4261,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, } } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond, rs, rt); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -4301,13 +4300,13 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, Jump(static_cast(target_index), rmode, cond, rs, rt); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { li(t6, reference); Jump(t6); } // Note: To call gcc-compiled C code on riscv64, you must call through t6. -void TurboAssembler::Call(Register target, Condition cond, Register rs, +void MacroAssembler::Call(Register target, Condition cond, Register rs, const Operand& rt) { BlockTrampolinePoolScope block_trampoline_pool(this); if (cond == cc_always) { @@ -4334,13 +4333,13 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, } } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { li(t6, Operand(static_cast(target), rmode), ADDRESS_LOAD); Call(t6, cond, rs, rt); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond, Register rs, const Operand& rt) { BlockTrampolinePoolScope block_trampoline_pool(this); DCHECK(RelocInfo::IsCodeTarget(rmode)); @@ -4374,7 +4373,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, // Call(static_cast
(target_index), rmode, cond, rs, rt); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin) { #if V8_TARGET_ARCH_RISCV64 static_assert(kSystemPointerSize == 8); #elif V8_TARGET_ARCH_RISCV32 @@ -4390,12 +4389,12 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin) { MemOperand(builtin, IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::CallBuiltinByIndex(Register builtin) { +void MacroAssembler::CallBuiltinByIndex(Register builtin) { LoadEntryFromBuiltinIndex(builtin); Call(builtin); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); switch (options().builtin_call_jump_mode) { case BuiltinCallJumpMode::kAbsolute: { @@ -4428,7 +4427,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); switch (options().builtin_call_jump_mode) { @@ -4462,18 +4461,18 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { LoadWord(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::PatchAndJump(Address target) { +void MacroAssembler::PatchAndJump(Address target) { BlockTrampolinePoolScope block_trampoline_pool(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -4490,7 +4489,7 @@ void TurboAssembler::PatchAndJump(Address target) { pc_ += sizeof(uintptr_t); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. // @@ -4528,14 +4527,14 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); } -void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) { +void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt) { Jump(ra, cond, rs, rt); if (cond == al) { ForceConstantPoolEmissionWithoutJump(); } } -void TurboAssembler::BranchLong(Label* L) { +void MacroAssembler::BranchLong(Label* L) { // Generate position independent long branch. BlockTrampolinePoolScope block_trampoline_pool(this); int32_t imm; @@ -4544,7 +4543,7 @@ void TurboAssembler::BranchLong(Label* L) { EmitConstPoolWithJumpIfNeeded(); } -void TurboAssembler::BranchAndLinkLong(Label* L) { +void MacroAssembler::BranchAndLinkLong(Label* L) { // Generate position independent long branch and link. BlockTrampolinePoolScope block_trampoline_pool(this); int32_t imm; @@ -4552,12 +4551,12 @@ void TurboAssembler::BranchAndLinkLong(Label* L) { GenPCRelativeJumpAndLink(t6, imm); } -void TurboAssembler::DropAndRet(int drop) { +void MacroAssembler::DropAndRet(int drop) { AddWord(sp, sp, drop * kSystemPointerSize); Ret(); } -void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, +void MacroAssembler::DropAndRet(int drop, Condition cond, Register r1, const Operand& r2) { // Both Drop and Ret need to be conditional. Label skip; @@ -4573,7 +4572,7 @@ void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, } } -void TurboAssembler::Drop(int count, Condition cond, Register reg, +void MacroAssembler::Drop(int count, Condition cond, Register reg, const Operand& op) { if (count <= 0) { return; @@ -4604,9 +4603,9 @@ void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { } } -void TurboAssembler::Call(Label* target) { BranchAndLink(target); } +void MacroAssembler::Call(Label* target) { BranchAndLink(target); } -void TurboAssembler::LoadAddress(Register dst, Label* target, +void MacroAssembler::LoadAddress(Register dst, Label* target, RelocInfo::Mode rmode) { int32_t offset; if (CalculateOffset(target, &offset, OffsetSize::kOffset32)) { @@ -4622,14 +4621,14 @@ void TurboAssembler::LoadAddress(Register dst, Label* target, } } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(smi)); push(scratch); } -void TurboAssembler::PushArray(Register array, Register size, +void MacroAssembler::PushArray(Register array, Register size, PushArrayOrder order) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -4658,7 +4657,7 @@ void TurboAssembler::PushArray(Register array, Register size, } } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); li(scratch, Operand(handle)); @@ -4701,7 +4700,7 @@ void MacroAssembler::PopStackHandler() { StoreWord(a1, MemOperand(scratch)); } -void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { // Subtracting 0.0 preserves all inputs except for signalling NaNs, which // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0 @@ -4712,19 +4711,19 @@ void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, fsub_d(dst, src, kDoubleRegZero); } -void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { Move(dst, fa0); // Reg fa0 is FP return value. } -void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { Move(dst, fa0); // Reg fa0 is FP first argument value. } -void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); } +void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(fa0, src); } -void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); } +void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(fa0, src); } -void TurboAssembler::MovToFloatParameters(DoubleRegister src1, +void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { const DoubleRegister fparg2 = fa1; if (src2 == fa0) { @@ -4747,10 +4746,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); LoadWord(destination, MemOperand(kRootRegister, static_cast(offset))); @@ -4916,8 +4915,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, // allow recompilation to take effect without changing any of the // call sites. Register code = kJavaScriptCallCodeStartRegister; - LoadTaggedPointerField(code, - FieldMemOperand(function, JSFunction::kCodeOffset)); + LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset)); switch (type) { case InvokeType::kCall: CallCodeObject(code); @@ -4944,11 +4942,10 @@ void MacroAssembler::InvokeFunctionWithNewTarget( { UseScratchRegisterScope temps(this); Register temp_reg = temps.Acquire(); - LoadTaggedPointerField( + LoadTaggedField( temp_reg, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); - LoadTaggedPointerField( - cp, FieldMemOperand(function, JSFunction::kContextOffset)); + LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset)); // The argument count is stored as uint16_t Lhu(expected_parameter_count, FieldMemOperand(temp_reg, @@ -4969,7 +4966,7 @@ void MacroAssembler::InvokeFunction(Register function, DCHECK_EQ(function, a1); // Get the function and setup the context. - LoadTaggedPointerField(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); + LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); InvokeFunctionCode(a1, no_reg, expected_parameter_count, actual_parameter_count, type); @@ -4992,7 +4989,7 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg, } //------------------------------------------------------------------------------ // Wasm -void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmseq_vv(v0, lhs, rhs); @@ -5001,7 +4998,7 @@ void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmsne_vv(v0, lhs, rhs); @@ -5010,7 +5007,7 @@ void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmsle_vv(v0, rhs, lhs); @@ -5019,7 +5016,7 @@ void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmsleu_vv(v0, rhs, lhs); @@ -5028,7 +5025,7 @@ void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmslt_vv(v0, rhs, lhs); @@ -5037,7 +5034,7 @@ void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, +void MacroAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew, Vlmul lmul) { VU.set(kScratchReg, sew, lmul); vmsltu_vv(v0, rhs, lhs); @@ -5046,7 +5043,7 @@ void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) { +void MacroAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) { uint64_t imm1 = *(reinterpret_cast(imms)); uint64_t imm2 = *((reinterpret_cast(imms)) + 1); VU.set(kScratchReg, VSew::E64, Vlmul::m1); @@ -5059,7 +5056,7 @@ void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) { vmerge_vx(dst, kScratchReg, dst); } -void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx, +void MacroAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx, MemOperand src) { if (ts == 8) { Lbu(kScratchReg2, src); @@ -5091,7 +5088,7 @@ void TurboAssembler::LoadLane(int ts, VRegister dst, uint8_t laneidx, } } -void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx, +void MacroAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst) { if (sz == 8) { VU.set(kScratchReg, E8, m1); @@ -5119,7 +5116,7 @@ void TurboAssembler::StoreLane(int sz, VRegister src, uint8_t laneidx, // ----------------------------------------------------------------------------- // Runtime calls. #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::AddOverflow64(Register dst, Register left, +void MacroAssembler::AddOverflow64(Register dst, Register left, const Operand& right, Register overflow) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5149,7 +5146,7 @@ void TurboAssembler::AddOverflow64(Register dst, Register left, } } -void TurboAssembler::SubOverflow64(Register dst, Register left, +void MacroAssembler::SubOverflow64(Register dst, Register left, const Operand& right, Register overflow) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5181,7 +5178,7 @@ void TurboAssembler::SubOverflow64(Register dst, Register left, } } -void TurboAssembler::MulOverflow32(Register dst, Register left, +void MacroAssembler::MulOverflow32(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -5207,7 +5204,7 @@ void TurboAssembler::MulOverflow32(Register dst, Register left, xor_(overflow, overflow, dst); } -void TurboAssembler::MulOverflow64(Register dst, Register left, +void MacroAssembler::MulOverflow64(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -5238,7 +5235,7 @@ void TurboAssembler::MulOverflow64(Register dst, Register left, } #elif V8_TARGET_ARCH_RISCV32 -void TurboAssembler::AddOverflow(Register dst, Register left, +void MacroAssembler::AddOverflow(Register dst, Register left, const Operand& right, Register overflow) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5268,7 +5265,7 @@ void TurboAssembler::AddOverflow(Register dst, Register left, } } -void TurboAssembler::SubOverflow(Register dst, Register left, +void MacroAssembler::SubOverflow(Register dst, Register left, const Operand& right, Register overflow) { UseScratchRegisterScope temps(this); BlockTrampolinePoolScope block_trampoline_pool(this); @@ -5300,7 +5297,7 @@ void TurboAssembler::SubOverflow(Register dst, Register left, } } -void TurboAssembler::MulOverflow32(Register dst, Register left, +void MacroAssembler::MulOverflow32(Register dst, Register left, const Operand& right, Register overflow) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); @@ -5422,15 +5419,15 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, // ----------------------------------------------------------------------------- // Debugging. -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Assert(Condition cc, AbortReason reason, Register rs, Operand rt) { if (v8_flags.debug_code) Check(cc, reason, rs, rt); } -void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, +void MacroAssembler::Check(Condition cc, AbortReason reason, Register rs, Operand rt) { Label L; BranchShort(&L, cc, rs, rt); @@ -5439,7 +5436,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -5496,22 +5493,21 @@ void TurboAssembler::Abort(AbortReason reason) { } } -void TurboAssembler::LoadMap(Register destination, Register object) { +void MacroAssembler::LoadMap(Register destination, Register object) { ASM_CODE_COMMENT(this); - LoadTaggedPointerField(destination, - FieldMemOperand(object, HeapObject::kMapOffset)); + LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { ASM_CODE_COMMENT(this); LoadMap(dst, cp); - LoadTaggedPointerField( + LoadTaggedField( dst, FieldMemOperand( dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); - LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index))); + LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index))); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -5519,9 +5515,9 @@ void TurboAssembler::StubPrologue(StackFrame::Type type) { PushCommonFrame(scratch); } -void TurboAssembler::Prologue() { PushStandardFrame(a1); } +void MacroAssembler::Prologue() { PushStandardFrame(a1); } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -5538,7 +5534,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); addi(sp, fp, 2 * kSystemPointerSize); LoadWord(ra, MemOperand(fp, 1 * kSystemPointerSize)); @@ -5661,7 +5657,7 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, bool do_return, } } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if V8_HOST_ARCH_RISCV32 || V8_HOST_ARCH_RISCV64 // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -5699,7 +5695,7 @@ void MacroAssembler::AssertStackIsAligned() { } } -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { ASM_CODE_COMMENT(this); if (SmiValuesAre32Bits()) { Lw(dst, MemOperand(src.rm(), SmiWordOffset(src.offset()))); @@ -5714,7 +5710,7 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { } } -void TurboAssembler::SmiToInt32(Register smi) { +void MacroAssembler::SmiToInt32(Register smi) { ASM_CODE_COMMENT(this); if (v8_flags.enable_slow_asserts) { AssertSmi(smi); @@ -5723,7 +5719,7 @@ void TurboAssembler::SmiToInt32(Register smi) { SmiUntag(smi); } -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) { +void MacroAssembler::JumpIfSmi(Register value, Label* smi_label) { ASM_CODE_COMMENT(this); DCHECK_EQ(0, kSmiTag); UseScratchRegisterScope temps(this); @@ -5754,7 +5750,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { Branch(not_smi_label, ne, scratch, Operand(zero_reg)); } -void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) { +void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -5764,7 +5760,7 @@ void TurboAssembler::AssertNotSmi(Register object, AbortReason reason) { } } -void TurboAssembler::AssertSmi(Register object, AbortReason reason) { +void MacroAssembler::AssertSmi(Register object, AbortReason reason) { if (v8_flags.debug_code) { ASM_CODE_COMMENT(this); static_assert(kSmiTag == 0); @@ -5877,7 +5873,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, } template -void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1, +void MacroAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1, FPURegister src2, MaxMinKind kind) { DCHECK((std::is_same::value) || (std::is_same::value)); @@ -5932,25 +5928,25 @@ void TurboAssembler::FloatMinMaxHelper(FPURegister dst, FPURegister src1, bind(&done); } -void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Max(FPURegister dst, FPURegister src1, FPURegister src2) { ASM_CODE_COMMENT(this); FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMax); } -void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float32Min(FPURegister dst, FPURegister src1, FPURegister src2) { ASM_CODE_COMMENT(this); FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMin); } -void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Max(FPURegister dst, FPURegister src1, FPURegister src2) { ASM_CODE_COMMENT(this); FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMax); } -void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, +void MacroAssembler::Float64Min(FPURegister dst, FPURegister src1, FPURegister src2) { ASM_CODE_COMMENT(this); FloatMinMaxHelper(dst, src1, src2, MaxMinKind::kMin); @@ -5958,7 +5954,7 @@ void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1, static const int kRegisterPassedArguments = 8; -int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments, +int MacroAssembler::CalculateStackPassedDWords(int num_gp_arguments, int num_fp_arguments) { int stack_passed_dwords = 0; @@ -5974,7 +5970,7 @@ int TurboAssembler::CalculateStackPassedDWords(int num_gp_arguments, return stack_passed_dwords; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { ASM_CODE_COMMENT(this); @@ -5999,12 +5995,12 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, } } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { BlockTrampolinePoolScope block_trampoline_pool(this); @@ -6012,21 +6008,21 @@ void TurboAssembler::CallCFunction(ExternalReference function, CallCFunctionHelper(t6, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); @@ -6114,7 +6110,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, #undef BRANCH_ARGS_CHECK -void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, +void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met) { And(scratch, object, Operand(~kPageAlignmentMask)); LoadWord(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); @@ -6137,7 +6133,7 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { auto pc = -pc_offset(); auipc(dst, 0); if (pc != 0) { @@ -6145,7 +6141,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { } } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -6158,12 +6154,12 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::LoadCodeEntry(Register destination, Register code) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code) { ASM_CODE_COMMENT(this); LoadWord(destination, FieldMemOperand(code, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -6172,13 +6168,13 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code) { +void MacroAssembler::CallCodeObject(Register code) { ASM_CODE_COMMENT(this); LoadCodeEntry(code, code); Call(code); } -void TurboAssembler::JumpCodeObject(Register code, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code, code); @@ -6186,25 +6182,16 @@ void TurboAssembler::JumpCodeObject(Register code, JumpMode jump_mode) { } #if V8_TARGET_ARCH_RISCV64 -void TurboAssembler::LoadTaggedPointerField(const Register& destination, - const MemOperand& field_operand) { +void MacroAssembler::LoadTaggedField(const Register& destination, + const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { - DecompressTaggedPointer(destination, field_operand); + DecompressTagged(destination, field_operand); } else { Ld(destination, field_operand); } } -void TurboAssembler::LoadAnyTaggedField(const Register& destination, - const MemOperand& field_operand) { - if (COMPRESS_POINTERS_BOOL) { - DecompressAnyTagged(destination, field_operand); - } else { - Ld(destination, field_operand); - } -} - -void TurboAssembler::LoadTaggedSignedField(const Register& destination, +void MacroAssembler::LoadTaggedSignedField(const Register& destination, const MemOperand& field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedSigned(destination, field_operand); @@ -6213,11 +6200,11 @@ void TurboAssembler::LoadTaggedSignedField(const Register& destination, } } -void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) { SmiUntag(dst, src); } -void TurboAssembler::StoreTaggedField(const Register& value, +void MacroAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand) { if (COMPRESS_POINTERS_BOOL) { Sw(value, dst_field_operand); @@ -6226,7 +6213,7 @@ void TurboAssembler::StoreTaggedField(const Register& value, } } -void TurboAssembler::DecompressTaggedSigned(const Register& destination, +void MacroAssembler::DecompressTaggedSigned(const Register& destination, const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Lwu(destination, field_operand); @@ -6237,26 +6224,19 @@ void TurboAssembler::DecompressTaggedSigned(const Register& destination, } } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, - const MemOperand& field_operand) { +void MacroAssembler::DecompressTagged(const Register& destination, + const MemOperand& field_operand) { ASM_CODE_COMMENT(this); Lwu(destination, field_operand); AddWord(destination, kPtrComprCageBaseRegister, destination); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, - const Register& source) { +void MacroAssembler::DecompressTagged(const Register& destination, + const Register& source) { ASM_CODE_COMMENT(this); And(destination, source, Operand(0xFFFFFFFF)); AddWord(destination, kPtrComprCageBaseRegister, Operand(destination)); } - -void TurboAssembler::DecompressAnyTagged(const Register& destination, - const MemOperand& field_operand) { - ASM_CODE_COMMENT(this); - Lwu(destination, field_operand); - AddWord(destination, kPtrComprCageBaseRegister, destination); -} #endif void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode, Register scratch) { diff --git a/src/codegen/riscv/macro-assembler-riscv.h b/src/codegen/riscv/macro-assembler-riscv.h index 22858331fa..931f2d9e5c 100644 --- a/src/codegen/riscv/macro-assembler-riscv.h +++ b/src/codegen/riscv/macro-assembler-riscv.h @@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) { return MemOperand(sp, offset); } -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; // Activation support. void EnterFrame(StackFrame::Type type); @@ -1072,14 +1072,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // --------------------------------------------------------------------------- // Pointer compression Support - // Loads a field containing a HeapObject and decompresses it if pointer - // compression is enabled. - void LoadTaggedPointerField(const Register& destination, - const MemOperand& field_operand); - // Loads a field containing any tagged value and decompresses it if necessary. - void LoadAnyTaggedField(const Register& destination, - const MemOperand& field_operand); + void LoadTaggedField(const Register& destination, + const MemOperand& field_operand); // Loads a field containing a tagged signed value and decompresses it if // necessary. @@ -1095,12 +1090,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void DecompressTaggedSigned(const Register& destination, const MemOperand& field_operand); - void DecompressTaggedPointer(const Register& destination, - const MemOperand& field_operand); - void DecompressTaggedPointer(const Register& destination, - const Register& source); - void DecompressAnyTagged(const Register& destination, - const MemOperand& field_operand); + void DecompressTagged(const Register& destination, + const MemOperand& field_operand); + void DecompressTagged(const Register& destination, const Register& source); void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) { if (COMPRESS_POINTERS_BOOL) { Sub32(rd, rs1, rs2); @@ -1113,12 +1105,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Pointer compression Support // rv32 don't support Pointer compression. Defines these functions for // simplify builtins. - inline void LoadTaggedPointerField(const Register& destination, - const MemOperand& field_operand) { - Lw(destination, field_operand); - } - inline void LoadAnyTaggedField(const Register& destination, - const MemOperand& field_operand) { + inline void LoadTaggedField(const Register& destination, + const MemOperand& field_operand) { Lw(destination, field_operand); } inline void LoadTaggedSignedField(const Register& destination, @@ -1174,71 +1162,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src); void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst); - protected: - inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); - inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); - - private: - bool has_double_zero_reg_set_ = false; - bool has_single_zero_reg_set_ = false; - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it - // succeeds, otherwise falls through if result is saturated. On return - // 'result' either holds answer, or is clobbered on fall through. - void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, - Label* done); - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); - - // TODO(RISCV) Reorder parameters so out parameters come last. - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, - Register* scratch, const Operand& rt); - - void BranchShortHelper(int32_t offset, Label* L); - bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs, - const Operand& rt); - bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, - const Operand& rt); - - void BranchAndLinkShortHelper(int32_t offset, Label* L); - void BranchAndLinkShort(int32_t offset); - void BranchAndLinkShort(Label* L); - bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - void BranchAndLinkLong(Label* L); -#if V8_TARGET_ARCH_RISCV64 - template - void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch, - FPURoundingMode mode); -#elif V8_TARGET_ARCH_RISCV32 - void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch, - FPURoundingMode mode); - - void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch, - FPURoundingMode mode); -#endif - template - void RoundHelper(VRegister dst, VRegister src, Register scratch, - VRegister v_scratch, FPURoundingMode frm); - - template - void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, - TruncFunc trunc); - - // Push a fixed frame, consisting of ra, fp. - void PushCommonFrame(Register marker_reg = no_reg); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. // TODO(victorgomes): Remove this function once we stick with the reversed @@ -1521,7 +1444,65 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { DecodeField(reg, reg); } + protected: + inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); + inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); + private: + bool has_double_zero_reg_set_ = false; + bool has_single_zero_reg_set_ = false; + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it + // succeeds, otherwise falls through if result is saturated. On return + // 'result' either holds answer, or is clobbered on fall through. + void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, + Label* done); + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + + // TODO(RISCV) Reorder parameters so out parameters come last. + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); + bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, + Register* scratch, const Operand& rt); + + void BranchShortHelper(int32_t offset, Label* L); + bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs, + const Operand& rt); + bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, + const Operand& rt); + + void BranchAndLinkShortHelper(int32_t offset, Label* L); + void BranchAndLinkShort(int32_t offset); + void BranchAndLinkShort(Label* L); + bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, + Register rs, const Operand& rt); + void BranchAndLinkLong(Label* L); +#if V8_TARGET_ARCH_RISCV64 + template + void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch, + FPURoundingMode mode); +#elif V8_TARGET_ARCH_RISCV32 + void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch, + FPURoundingMode mode); + + void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch, + FPURoundingMode mode); +#endif + template + void RoundHelper(VRegister dst, VRegister src, Register scratch, + VRegister v_scratch, FPURoundingMode frm); + + template + void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result, + TruncFunc trunc); + + // Push a fixed frame, consisting of ra, fp. + void PushCommonFrame(Register marker_reg = no_reg); + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, @@ -1538,7 +1519,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { }; template -void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, +void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count, Func GetLabelFunction) { // Ensure that dd-ed labels following this instruction use 8 bytes aligned // addresses. diff --git a/src/codegen/s390/assembler-s390-inl.h b/src/codegen/s390/assembler-s390-inl.h index 22d9d77b50..422b92455b 100644 --- a/src/codegen/s390/assembler-s390-inl.h +++ b/src/codegen/s390/assembler-s390-inl.h @@ -142,7 +142,7 @@ Handle Assembler::code_target_object_handle_at(Address pc) { HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); if (IsCompressedEmbeddedObject(rmode_)) { - return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny( + return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged( cage_base, Assembler::target_compressed_address_at(pc_, constant_pool_)))); } else { diff --git a/src/codegen/s390/assembler-s390.h b/src/codegen/s390/assembler-s390.h index 511e8c2489..9b0c1cedf2 100644 --- a/src/codegen/s390/assembler-s390.h +++ b/src/codegen/s390/assembler-s390.h @@ -1494,7 +1494,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { private: friend class Assembler; - friend class TurboAssembler; + friend class MacroAssembler; Assembler* assembler_; RegList old_available_; diff --git a/src/codegen/s390/constants-s390.h b/src/codegen/s390/constants-s390.h index 8bade7b2ab..c8d1121a91 100644 --- a/src/codegen/s390/constants-s390.h +++ b/src/codegen/s390/constants-s390.h @@ -123,7 +123,7 @@ enum Condition { kNotZero = 21, }; -inline Condition check_condition(Condition cond) { +inline Condition to_condition(Condition cond) { switch (cond) { case kUnsignedLessThan: return lt; @@ -143,6 +143,31 @@ inline Condition check_condition(Condition cond) { return cond; } +inline bool is_signed(Condition cond) { + switch (cond) { + case kEqual: + case kNotEqual: + case kLessThan: + case kGreaterThan: + case kLessThanEqual: + case kGreaterThanEqual: + case kOverflow: + case kNoOverflow: + case kZero: + case kNotZero: + return true; + + case kUnsignedLessThan: + case kUnsignedGreaterThan: + case kUnsignedLessThanEqual: + case kUnsignedGreaterThanEqual: + return false; + + default: + UNREACHABLE(); + } +} + inline Condition NegateCondition(Condition cond) { DCHECK(cond != al); switch (cond) { diff --git a/src/codegen/s390/macro-assembler-s390.cc b/src/codegen/s390/macro-assembler-s390.cc index 817ab84aed..700ebe58bb 100644 --- a/src/codegen/s390/macro-assembler-s390.cc +++ b/src/codegen/s390/macro-assembler-s390.cc @@ -55,7 +55,7 @@ constexpr int kStackSavedSavedFPSizeInBytes = } // namespace -void TurboAssembler::DoubleMax(DoubleRegister result_reg, +void MacroAssembler::DoubleMax(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg) { if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) { @@ -101,7 +101,7 @@ void TurboAssembler::DoubleMax(DoubleRegister result_reg, bind(&done); } -void TurboAssembler::DoubleMin(DoubleRegister result_reg, +void MacroAssembler::DoubleMin(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg) { if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) { @@ -152,7 +152,7 @@ void TurboAssembler::DoubleMin(DoubleRegister result_reg, bind(&done); } -void TurboAssembler::FloatMax(DoubleRegister result_reg, +void MacroAssembler::FloatMax(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg) { if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) { @@ -197,7 +197,7 @@ void TurboAssembler::FloatMax(DoubleRegister result_reg, bind(&done); } -void TurboAssembler::FloatMin(DoubleRegister result_reg, +void MacroAssembler::FloatMin(DoubleRegister result_reg, DoubleRegister left_reg, DoubleRegister right_reg) { if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_1)) { @@ -249,39 +249,39 @@ void TurboAssembler::FloatMin(DoubleRegister result_reg, bind(&done); } -void TurboAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::CeilF32(DoubleRegister dst, DoubleRegister src) { fiebra(ROUND_TOWARD_POS_INF, dst, src); } -void TurboAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::CeilF64(DoubleRegister dst, DoubleRegister src) { fidbra(ROUND_TOWARD_POS_INF, dst, src); } -void TurboAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::FloorF32(DoubleRegister dst, DoubleRegister src) { fiebra(ROUND_TOWARD_NEG_INF, dst, src); } -void TurboAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::FloorF64(DoubleRegister dst, DoubleRegister src) { fidbra(ROUND_TOWARD_NEG_INF, dst, src); } -void TurboAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::TruncF32(DoubleRegister dst, DoubleRegister src) { fiebra(ROUND_TOWARD_0, dst, src); } -void TurboAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::TruncF64(DoubleRegister dst, DoubleRegister src) { fidbra(ROUND_TOWARD_0, dst, src); } -void TurboAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::NearestIntF32(DoubleRegister dst, DoubleRegister src) { fiebra(ROUND_TO_NEAREST_TO_EVEN, dst, src); } -void TurboAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::NearestIntF64(DoubleRegister dst, DoubleRegister src) { fidbra(ROUND_TO_NEAREST_TO_EVEN, dst, src); } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) const { @@ -298,7 +298,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -316,7 +316,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register scratch, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch, +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; @@ -333,7 +333,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register scratch, return bytes; } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); @@ -343,18 +343,17 @@ void TurboAssembler::LoadFromConstantsTable(Register destination, CHECK(is_uint19(offset)); DCHECK_NE(destination, r0); LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); - LoadTaggedPointerField( - destination, - FieldMemOperand(destination, - FixedArray::OffsetOfElementAt(constant_index)), - r1); + LoadTaggedField(destination, + FieldMemOperand(destination, FixedArray::OffsetOfElementAt( + constant_index)), + r1); } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { LoadU64(destination, MemOperand(kRootRegister, offset)); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { if (offset == 0) { mov(destination, kRootRegister); @@ -366,7 +365,7 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -MemOperand TurboAssembler::ExternalReferenceAsOperand( +MemOperand MacroAssembler::ExternalReferenceAsOperand( ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t offset = @@ -396,9 +395,9 @@ MemOperand TurboAssembler::ExternalReferenceAsOperand( return MemOperand(scratch, 0); } -void TurboAssembler::Jump(Register target, Condition cond) { b(cond, target); } +void MacroAssembler::Jump(Register target, Condition cond) { b(cond, target); } -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond) { Label skip; @@ -410,13 +409,13 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond) { DCHECK(!RelocInfo::IsCodeTarget(rmode)); Jump(static_cast(target), rmode, cond); } -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode)); DCHECK_IMPLIES(options().isolate_independent_code, @@ -431,14 +430,14 @@ void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, jump(code, RelocInfo::RELATIVE_CODE_TARGET, cond); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); Move(scratch, reference); Jump(scratch); } -void TurboAssembler::Call(Register target) { +void MacroAssembler::Call(Register target) { // Branch to target via indirect branch basr(r14, target); } @@ -461,7 +460,7 @@ int MacroAssembler::CallSizeNotPredictableCodeSize(Address target, return size; } -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, +void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond) { DCHECK(cond == al); @@ -469,7 +468,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, basr(r14, ip); } -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, +void MacroAssembler::Call(Handle code, RelocInfo::Mode rmode, Condition cond) { DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al); @@ -485,7 +484,7 @@ void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, call(code, rmode); } -void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::CallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not // preserve scratch registers across calls. @@ -509,7 +508,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); // Use ip directly instead of using UseScratchRegisterScope, as we do not @@ -539,7 +538,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cond) { } } -void TurboAssembler::Drop(int count) { +void MacroAssembler::Drop(int count) { if (count > 0) { int total = count * kSystemPointerSize; if (is_uint12(total)) { @@ -552,7 +551,7 @@ void TurboAssembler::Drop(int count) { } } -void TurboAssembler::Drop(Register count, Register scratch) { +void MacroAssembler::Drop(Register count, Register scratch) { ShiftLeftU64(scratch, count, Operand(kSystemPointerSizeLog2)); AddS64(sp, sp, scratch); } @@ -568,19 +567,19 @@ Operand MacroAssembler::ClearedValue() const { static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); } -void TurboAssembler::Call(Label* target) { b(r14, target); } +void MacroAssembler::Call(Label* target) { b(r14, target); } -void TurboAssembler::Push(Handle handle) { +void MacroAssembler::Push(Handle handle) { mov(r0, Operand(handle)); push(r0); } -void TurboAssembler::Push(Smi smi) { +void MacroAssembler::Push(Smi smi) { mov(r0, Operand(smi)); push(r0); } -void TurboAssembler::Move(Register dst, Handle value, +void MacroAssembler::Move(Register dst, Handle value, RelocInfo::Mode rmode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than @@ -598,7 +597,7 @@ void TurboAssembler::Move(Register dst, Handle value, } } -void TurboAssembler::Move(Register dst, ExternalReference reference) { +void MacroAssembler::Move(Register dst, ExternalReference reference) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -609,7 +608,7 @@ void TurboAssembler::Move(Register dst, ExternalReference reference) { mov(dst, Operand(reference)); } -void TurboAssembler::Move(Register dst, Register src, Condition cond) { +void MacroAssembler::Move(Register dst, Register src, Condition cond) { if (dst != src) { if (cond == al) { mov(dst, src); @@ -619,38 +618,38 @@ void TurboAssembler::Move(Register dst, Register src, Condition cond) { } } -void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { if (dst != src) { ldr(dst, src); } } -void TurboAssembler::Move(Register dst, const MemOperand& src) { +void MacroAssembler::Move(Register dst, const MemOperand& src) { LoadU64(dst, src); } // Wrapper around Assembler::mvc (SS-a format) -void TurboAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2, +void MacroAssembler::MoveChar(const MemOperand& opnd1, const MemOperand& opnd2, const Operand& length) { mvc(opnd1, opnd2, Operand(static_cast(length.immediate() - 1))); } // Wrapper around Assembler::clc (SS-a format) -void TurboAssembler::CompareLogicalChar(const MemOperand& opnd1, +void MacroAssembler::CompareLogicalChar(const MemOperand& opnd1, const MemOperand& opnd2, const Operand& length) { clc(opnd1, opnd2, Operand(static_cast(length.immediate() - 1))); } // Wrapper around Assembler::xc (SS-a format) -void TurboAssembler::ExclusiveOrChar(const MemOperand& opnd1, +void MacroAssembler::ExclusiveOrChar(const MemOperand& opnd1, const MemOperand& opnd2, const Operand& length) { xc(opnd1, opnd2, Operand(static_cast(length.immediate() - 1))); } // Wrapper around Assembler::risbg(n) (RIE-f) -void TurboAssembler::RotateInsertSelectBits(Register dst, Register src, +void MacroAssembler::RotateInsertSelectBits(Register dst, Register src, const Operand& startBit, const Operand& endBit, const Operand& shiftAmt, @@ -663,7 +662,7 @@ void TurboAssembler::RotateInsertSelectBits(Register dst, Register src, risbg(dst, src, startBit, endBit, shiftAmt); } -void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc, +void MacroAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc, Label* L) { #if V8_TARGET_ARCH_S390X brxhg(dst, inc, L); @@ -672,7 +671,7 @@ void TurboAssembler::BranchRelativeOnIdxHighP(Register dst, Register inc, #endif // V8_TARGET_ARCH_S390X } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, Register scratch2, PushArrayOrder order) { Label loop, done; @@ -703,7 +702,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -void TurboAssembler::MultiPush(RegList regs, Register location) { +void MacroAssembler::MultiPush(RegList regs, Register location) { int16_t num_to_push = regs.Count(); int16_t stack_offset = num_to_push * kSystemPointerSize; @@ -716,7 +715,7 @@ void TurboAssembler::MultiPush(RegList regs, Register location) { } } -void TurboAssembler::MultiPop(RegList regs, Register location) { +void MacroAssembler::MultiPop(RegList regs, Register location) { int16_t stack_offset = 0; for (int16_t i = 0; i < Register::kNumRegisters; i++) { @@ -728,7 +727,7 @@ void TurboAssembler::MultiPop(RegList regs, Register location) { AddS64(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { +void MacroAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { int16_t num_to_push = dregs.Count(); int16_t stack_offset = num_to_push * kDoubleSize; @@ -742,7 +741,7 @@ void TurboAssembler::MultiPushDoubles(DoubleRegList dregs, Register location) { } } -void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch, +void MacroAssembler::MultiPushV128(DoubleRegList dregs, Register scratch, Register location) { int16_t num_to_push = dregs.Count(); int16_t stack_offset = num_to_push * kSimd128Size; @@ -757,7 +756,7 @@ void TurboAssembler::MultiPushV128(DoubleRegList dregs, Register scratch, } } -void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { +void MacroAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { int16_t stack_offset = 0; for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) { @@ -770,7 +769,7 @@ void TurboAssembler::MultiPopDoubles(DoubleRegList dregs, Register location) { AddS64(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch, +void MacroAssembler::MultiPopV128(DoubleRegList dregs, Register scratch, Register location) { int16_t stack_offset = 0; @@ -784,7 +783,7 @@ void TurboAssembler::MultiPopV128(DoubleRegList dregs, Register scratch, AddS64(location, location, Operand(stack_offset)); } -void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch, +void MacroAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch, Register location) { #if V8_ENABLE_WEBASSEMBLY bool generating_bultins = @@ -818,7 +817,7 @@ void TurboAssembler::MultiPushF64OrV128(DoubleRegList dregs, Register scratch, #endif } -void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch, +void MacroAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch, Register location) { #if V8_ENABLE_WEBASSEMBLY bool generating_bultins = @@ -850,7 +849,7 @@ void TurboAssembler::MultiPopF64OrV128(DoubleRegList dregs, Register scratch, #endif } -void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) { ASM_CODE_COMMENT(this); if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { mov(destination, Operand(ReadOnlyRootPtr(index), RelocInfo::Mode::NO_INFO)); @@ -859,37 +858,27 @@ void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { LoadRoot(destination, index); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index, +void MacroAssembler::LoadRoot(Register destination, RootIndex index, Condition) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { - DecompressTaggedPointer(destination, ReadOnlyRootPtr(index)); + DecompressTagged(destination, ReadOnlyRootPtr(index)); return; } LoadU64(destination, MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0); } -void TurboAssembler::LoadTaggedPointerField(const Register& destination, - const MemOperand& field_operand, - const Register& scratch) { +void MacroAssembler::LoadTaggedField(const Register& destination, + const MemOperand& field_operand, + const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { - DecompressTaggedPointer(destination, field_operand); + DecompressTagged(destination, field_operand); } else { LoadU64(destination, field_operand, scratch); } } -void TurboAssembler::LoadAnyTaggedField(const Register& destination, - const MemOperand& field_operand, - const Register& scratch) { - if (COMPRESS_POINTERS_BOOL) { - DecompressAnyTagged(destination, field_operand); - } else { - LoadU64(destination, field_operand, scratch); - } -} - -void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntag(Register dst, const MemOperand& src) { if (SmiValuesAre31Bits()) { LoadS32(dst, src); } else { @@ -898,11 +887,11 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) { SmiUntag(dst); } -void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src) { +void MacroAssembler::SmiUntagField(Register dst, const MemOperand& src) { SmiUntag(dst, src); } -void TurboAssembler::StoreTaggedField(const Register& value, +void MacroAssembler::StoreTaggedField(const Register& value, const MemOperand& dst_field_operand, const Register& scratch) { if (COMPRESS_POINTERS_BOOL) { @@ -914,60 +903,43 @@ void TurboAssembler::StoreTaggedField(const Register& value, } } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, Register src) { RecordComment("[ DecompressTaggedSigned"); llgfr(destination, src); RecordComment("]"); } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, MemOperand field_operand) { RecordComment("[ DecompressTaggedSigned"); llgf(destination, field_operand); RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(Register destination, - Register source) { - RecordComment("[ DecompressTaggedPointer"); +void MacroAssembler::DecompressTagged(Register destination, Register source) { + RecordComment("[ DecompressTagged"); llgfr(destination, source); agr(destination, kRootRegister); RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(Register destination, - MemOperand field_operand) { - RecordComment("[ DecompressTaggedPointer"); +void MacroAssembler::DecompressTagged(Register destination, + MemOperand field_operand) { + RecordComment("[ DecompressTagged"); llgf(destination, field_operand); agr(destination, kRootRegister); RecordComment("]"); } -void TurboAssembler::DecompressTaggedPointer(const Register& destination, - Tagged_t immediate) { +void MacroAssembler::DecompressTagged(const Register& destination, + Tagged_t immediate) { ASM_CODE_COMMENT(this); mov(destination, Operand(immediate, RelocInfo::NO_INFO)); agr(destination, kRootRegister); } -void TurboAssembler::DecompressAnyTagged(Register destination, - MemOperand field_operand) { - RecordComment("[ DecompressAnyTagged"); - llgf(destination, field_operand); - agr(destination, kRootRegister); - RecordComment("]"); -} - -void TurboAssembler::DecompressAnyTagged(Register destination, - Register source) { - RecordComment("[ DecompressAnyTagged"); - llgfr(destination, source); - agr(destination, kRootRegister); - RecordComment("]"); -} - -void TurboAssembler::LoadTaggedSignedField(Register destination, +void MacroAssembler::LoadTaggedSignedField(Register destination, MemOperand field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedSigned(destination, field_operand); @@ -1015,17 +987,17 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::MaybeSaveRegisters(RegList registers) { +void MacroAssembler::MaybeSaveRegisters(RegList registers) { if (registers.is_empty()) return; MultiPush(registers); } -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { +void MacroAssembler::MaybeRestoreRegisters(RegList registers) { if (registers.is_empty()) return; MultiPop(registers); } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { DCHECK(!AreAliased(object, slot_address)); @@ -1048,7 +1020,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -1071,7 +1043,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, MaybeRestoreRegisters(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { // Use CallRecordWriteStubSaveRegisters if the object and slot registers @@ -1099,7 +1071,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, SaveFPRegsMode fp_mode, SmiCheck smi_check) { DCHECK(!AreAliased(object, slot_address, value)); if (v8_flags.debug_code) { - LoadTaggedPointerField(r0, MemOperand(slot_address)); + LoadTaggedField(r0, MemOperand(slot_address)); CmpS64(value, r0); Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite); } @@ -1144,7 +1116,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, } } -void TurboAssembler::PushCommonFrame(Register marker_reg) { +void MacroAssembler::PushCommonFrame(Register marker_reg) { ASM_CODE_COMMENT(this); int fp_delta = 0; CleanseP(r14); @@ -1158,7 +1130,7 @@ void TurboAssembler::PushCommonFrame(Register marker_reg) { la(fp, MemOperand(sp, fp_delta * kSystemPointerSize)); } -void TurboAssembler::PopCommonFrame(Register marker_reg) { +void MacroAssembler::PopCommonFrame(Register marker_reg) { if (marker_reg.is_valid()) { Pop(r14, fp, marker_reg); } else { @@ -1166,7 +1138,7 @@ void TurboAssembler::PopCommonFrame(Register marker_reg) { } } -void TurboAssembler::PushStandardFrame(Register function_reg) { +void MacroAssembler::PushStandardFrame(Register function_reg) { int fp_delta = 0; CleanseP(r14); if (function_reg.is_valid()) { @@ -1180,7 +1152,7 @@ void TurboAssembler::PushStandardFrame(Register function_reg) { Push(kJavaScriptCallArgCountRegister); } -void TurboAssembler::RestoreFrameStateForTailCall() { +void MacroAssembler::RestoreFrameStateForTailCall() { // if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { // LoadU64(kConstantPoolRegister, // MemOperand(fp, StandardFrameConstants::kConstantPoolOffset)); @@ -1191,7 +1163,7 @@ void TurboAssembler::RestoreFrameStateForTailCall() { LoadU64(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); } -void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst, +void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src) { // Turn potential sNaN into qNaN if (dst != src) ldr(dst, src); @@ -1199,11 +1171,11 @@ void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst, sdbr(dst, kDoubleRegZero); } -void TurboAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) { +void MacroAssembler::ConvertIntToDouble(DoubleRegister dst, Register src) { cdfbr(dst, src); } -void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst, +void MacroAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst, Register src) { if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) { cdlfbr(Condition(5), Condition(0), dst, src); @@ -1215,36 +1187,36 @@ void TurboAssembler::ConvertUnsignedIntToDouble(DoubleRegister dst, } } -void TurboAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) { +void MacroAssembler::ConvertIntToFloat(DoubleRegister dst, Register src) { cefbra(Condition(4), dst, src); } -void TurboAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst, +void MacroAssembler::ConvertUnsignedIntToFloat(DoubleRegister dst, Register src) { celfbr(Condition(4), Condition(0), dst, src); } -void TurboAssembler::ConvertInt64ToFloat(DoubleRegister double_dst, +void MacroAssembler::ConvertInt64ToFloat(DoubleRegister double_dst, Register src) { cegbr(double_dst, src); } -void TurboAssembler::ConvertInt64ToDouble(DoubleRegister double_dst, +void MacroAssembler::ConvertInt64ToDouble(DoubleRegister double_dst, Register src) { cdgbr(double_dst, src); } -void TurboAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst, +void MacroAssembler::ConvertUnsignedInt64ToFloat(DoubleRegister double_dst, Register src) { celgbr(Condition(0), Condition(0), double_dst, src); } -void TurboAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst, +void MacroAssembler::ConvertUnsignedInt64ToDouble(DoubleRegister double_dst, Register src) { cdlgbr(Condition(0), Condition(0), double_dst, src); } -void TurboAssembler::ConvertFloat32ToInt64(const Register dst, +void MacroAssembler::ConvertFloat32ToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1266,7 +1238,7 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst, cgebr(m, dst, double_input); } -void TurboAssembler::ConvertDoubleToInt64(const Register dst, +void MacroAssembler::ConvertDoubleToInt64(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1288,7 +1260,7 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst, cgdbr(m, dst, double_input); } -void TurboAssembler::ConvertDoubleToInt32(const Register dst, +void MacroAssembler::ConvertDoubleToInt32(const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1314,7 +1286,7 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst, cfdbr(m, dst, double_input); } -void TurboAssembler::ConvertFloat32ToInt32(const Register result, +void MacroAssembler::ConvertFloat32ToInt32(const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1340,7 +1312,7 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result, cfebr(m, result, double_input); } -void TurboAssembler::ConvertFloat32ToUnsignedInt32( +void MacroAssembler::ConvertFloat32ToUnsignedInt32( const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1365,7 +1337,7 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32( clfebr(m, Condition(0), result, double_input); } -void TurboAssembler::ConvertFloat32ToUnsignedInt64( +void MacroAssembler::ConvertFloat32ToUnsignedInt64( const Register result, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1387,7 +1359,7 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64( clgebr(m, Condition(0), result, double_input); } -void TurboAssembler::ConvertDoubleToUnsignedInt64( +void MacroAssembler::ConvertDoubleToUnsignedInt64( const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1409,7 +1381,7 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64( clgdbr(m, Condition(0), dst, double_input); } -void TurboAssembler::ConvertDoubleToUnsignedInt32( +void MacroAssembler::ConvertDoubleToUnsignedInt32( const Register dst, const DoubleRegister double_input, FPRoundingMode rounding_mode) { Condition m = Condition(0); @@ -1434,15 +1406,15 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32( clfdbr(m, Condition(0), dst, double_input); } -void TurboAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) { +void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) { lgdr(dst, src); } -void TurboAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) { +void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) { ldgr(dst, src); } -void TurboAssembler::StubPrologue(StackFrame::Type type, Register base, +void MacroAssembler::StubPrologue(StackFrame::Type type, Register base, int prologue_offset) { { ConstantPoolUnavailableScope constant_pool_unavailable(this); @@ -1451,12 +1423,12 @@ void TurboAssembler::StubPrologue(StackFrame::Type type, Register base, } } -void TurboAssembler::Prologue(Register base, int prologue_offset) { +void MacroAssembler::Prologue(Register base, int prologue_offset) { DCHECK(base != no_reg); PushStandardFrame(r3); } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0; @@ -1482,7 +1454,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, ArgumentsCountType type, ArgumentsCountMode mode) { @@ -1497,7 +1469,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, } } -void TurboAssembler::EnterFrame(StackFrame::Type type, +void MacroAssembler::EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { ASM_CODE_COMMENT(this); // We create a stack frame with: @@ -1518,7 +1490,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type, #endif // V8_ENABLE_WEBASSEMBLY } -int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { +int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { ASM_CODE_COMMENT(this); // Drop the execution stack down to the frame pointer and restore // the caller frame pointer, return address and constant pool pointer. @@ -1594,7 +1566,7 @@ void MacroAssembler::EnterExitFrame(int stack_space, // Allocate and align the frame preparing for calling the runtime // function. - const int frame_alignment = TurboAssembler::ActivationFrameAlignment(); + const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (frame_alignment > 0) { DCHECK_EQ(frame_alignment, 8); ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8 @@ -1608,7 +1580,7 @@ void MacroAssembler::EnterExitFrame(int stack_space, StoreU64(r1, MemOperand(fp, ExitFrameConstants::kSPOffset)); } -int TurboAssembler::ActivationFrameAlignment() { +int MacroAssembler::ActivationFrameAlignment() { #if !defined(USE_SIMULATOR) // Running on the real platform. Use the alignment as mandated by the local // environment. @@ -1655,11 +1627,11 @@ void MacroAssembler::LeaveExitFrame(Register argument_count, } } -void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { Move(dst, d0); } -void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) { +void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { Move(dst, d0); } @@ -1670,10 +1642,10 @@ MemOperand MacroAssembler::StackLimitAsMemOperand(StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); return MemOperand(kRootRegister, offset); } @@ -1827,8 +1799,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, // allow recompilation to take effect without changing any of the // call sites. Register code = kJavaScriptCallCodeStartRegister; - LoadTaggedPointerField(code, - FieldMemOperand(function, JSFunction::kCodeOffset)); + LoadTaggedField(code, FieldMemOperand(function, JSFunction::kCodeOffset)); switch (type) { case InvokeType::kCall: CallCodeObject(code); @@ -1853,9 +1824,9 @@ void MacroAssembler::InvokeFunctionWithNewTarget( Register expected_reg = r4; Register temp_reg = r6; - LoadTaggedPointerField(cp, FieldMemOperand(fun, JSFunction::kContextOffset)); - LoadTaggedPointerField( - temp_reg, FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset)); + LoadTaggedField(cp, FieldMemOperand(fun, JSFunction::kContextOffset)); + LoadTaggedField(temp_reg, + FieldMemOperand(fun, JSFunction::kSharedFunctionInfoOffset)); LoadU16( expected_reg, FieldMemOperand(temp_reg, @@ -1876,8 +1847,7 @@ void MacroAssembler::InvokeFunction(Register function, DCHECK_EQ(function, r3); // Get the function and setup the context. - LoadTaggedPointerField(cp, - FieldMemOperand(function, JSFunction::kContextOffset)); + LoadTaggedField(cp, FieldMemOperand(function, JSFunction::kContextOffset)); InvokeFunctionCode(r3, no_reg, expected_parameter_count, actual_parameter_count, type); @@ -1977,7 +1947,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, ble(on_in_range); } -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, +void MacroAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, DoubleRegister double_input, StubCallMode stub_mode) { @@ -2009,7 +1979,7 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, bind(&done); } -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, +void MacroAssembler::TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input, Label* done) { ConvertDoubleToInt64(result, double_input); @@ -2158,10 +2128,9 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( bind(&maybe_has_optimized_code); Register optimized_code_entry = flags; - LoadAnyTaggedField( - optimized_code_entry, - FieldMemOperand(feedback_vector, - FeedbackVector::kMaybeOptimizedCodeOffset)); + LoadTaggedField(optimized_code_entry, + FieldMemOperand(feedback_vector, + FeedbackVector::kMaybeOptimizedCodeOffset)); TailCallOptimizedCodeSlot(this, optimized_code_entry, r8); } @@ -2245,7 +2214,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, } } -void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { +void MacroAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { Label L; b(cond, &L); Abort(reason); @@ -2253,7 +2222,7 @@ void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) { bind(&L); } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); if (v8_flags.code_comments) { @@ -2300,29 +2269,28 @@ void TurboAssembler::Abort(AbortReason reason) { // will not return here } -void TurboAssembler::LoadMap(Register destination, Register object) { - LoadTaggedPointerField(destination, - FieldMemOperand(object, HeapObject::kMapOffset)); +void MacroAssembler::LoadMap(Register destination, Register object) { + LoadTaggedField(destination, FieldMemOperand(object, HeapObject::kMapOffset)); } void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { LoadMap(dst, cp); - LoadTaggedPointerField( + LoadTaggedField( dst, FieldMemOperand( dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); - LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index))); + LoadTaggedField(dst, MemOperand(dst, Context::SlotOffset(index))); } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) { +void MacroAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) { if (v8_flags.debug_code) Check(cond, reason, cr); } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (v8_flags.debug_code) { static_assert(kSmiTag == 0); TestIfSmi(object); @@ -2330,7 +2298,7 @@ void TurboAssembler::AssertNotSmi(Register object) { } } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (v8_flags.debug_code) { static_assert(kSmiTag == 0); TestIfSmi(object); @@ -2426,7 +2394,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, static const int kRegisterPassedArguments = 5; -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, +int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, int num_double_arguments) { int stack_passed_words = 0; if (num_double_arguments > DoubleRegister::kNumRegisters) { @@ -2440,7 +2408,7 @@ int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, return stack_passed_words; } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, int num_double_arguments, Register scratch) { int frame_alignment = ActivationFrameAlignment(); @@ -2463,16 +2431,16 @@ void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, lay(sp, MemOperand(sp, (-stack_space) * kSystemPointerSize)); } -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, +void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, Register scratch) { PrepareCallCFunction(num_reg_arguments, 0, scratch); } -void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); } +void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); } -void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); } +void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); } -void TurboAssembler::MovToFloatParameters(DoubleRegister src1, +void MacroAssembler::MovToFloatParameters(DoubleRegister src1, DoubleRegister src2) { if (src2 == d0) { DCHECK(src1 != d2); @@ -2484,28 +2452,28 @@ void TurboAssembler::MovToFloatParameters(DoubleRegister src1, } } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_reg_arguments, int num_double_arguments) { Move(ip, function); CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, +void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, int num_double_arguments) { CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { CallCFunction(function, num_arguments, 0); } -void TurboAssembler::CallCFunctionHelper(Register function, +void MacroAssembler::CallCFunctionHelper(Register function, int num_reg_arguments, int num_double_arguments) { DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); @@ -2570,7 +2538,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, } } -void TurboAssembler::CheckPageFlag( +void MacroAssembler::CheckPageFlag( Register object, Register scratch, // scratch may be same register as object int mask, Condition cc, Label* condition_met) { @@ -2630,9 +2598,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, UNREACHABLE(); } -void TurboAssembler::mov(Register dst, Register src) { lgr(dst, src); } +void MacroAssembler::mov(Register dst, Register src) { lgr(dst, src); } -void TurboAssembler::mov(Register dst, const Operand& src) { +void MacroAssembler::mov(Register dst, const Operand& src) { int64_t value = 0; if (src.is_heap_number_request()) { @@ -2677,7 +2645,7 @@ void TurboAssembler::mov(Register dst, const Operand& src) { iilf(dst, Operand(lo_32)); } -void TurboAssembler::MulS32(Register dst, const MemOperand& src1) { +void MacroAssembler::MulS32(Register dst, const MemOperand& src1) { if (is_uint12(src1.offset())) { ms(dst, src1); } else if (is_int20(src1.offset())) { @@ -2687,9 +2655,9 @@ void TurboAssembler::MulS32(Register dst, const MemOperand& src1) { } } -void TurboAssembler::MulS32(Register dst, Register src1) { msr(dst, src1); } +void MacroAssembler::MulS32(Register dst, Register src1) { msr(dst, src1); } -void TurboAssembler::MulS32(Register dst, const Operand& src1) { +void MacroAssembler::MulS32(Register dst, const Operand& src1) { msfi(dst, src1); } @@ -2700,19 +2668,19 @@ void TurboAssembler::MulS32(Register dst, const Operand& src1) { srlg(dst, dst, Operand(32)); \ } -void TurboAssembler::MulHighS32(Register dst, Register src1, +void MacroAssembler::MulHighS32(Register dst, Register src1, const MemOperand& src2) { Generate_MulHigh32(msgf); } -void TurboAssembler::MulHighS32(Register dst, Register src1, Register src2) { +void MacroAssembler::MulHighS32(Register dst, Register src1, Register src2) { if (dst == src2) { std::swap(src1, src2); } Generate_MulHigh32(msgfr); } -void TurboAssembler::MulHighS32(Register dst, Register src1, +void MacroAssembler::MulHighS32(Register dst, Register src1, const Operand& src2) { Generate_MulHigh32(msgfi); } @@ -2726,16 +2694,16 @@ void TurboAssembler::MulHighS32(Register dst, Register src1, LoadU32(dst, r0); \ } -void TurboAssembler::MulHighU32(Register dst, Register src1, +void MacroAssembler::MulHighU32(Register dst, Register src1, const MemOperand& src2) { Generate_MulHighU32(ml); } -void TurboAssembler::MulHighU32(Register dst, Register src1, Register src2) { +void MacroAssembler::MulHighU32(Register dst, Register src1, Register src2) { Generate_MulHighU32(mlr); } -void TurboAssembler::MulHighU32(Register dst, Register src1, +void MacroAssembler::MulHighU32(Register dst, Register src1, const Operand& src2) { USE(dst); USE(src1); @@ -2752,7 +2720,7 @@ void TurboAssembler::MulHighU32(Register dst, Register src1, cgfr(dst, dst); \ } -void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, +void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, const MemOperand& src2) { Register result = dst; if (src2.rx() == dst || src2.rb() == dst) dst = r0; @@ -2760,7 +2728,7 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, if (result != dst) llgfr(result, dst); } -void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, +void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2) { if (dst == src2) { std::swap(src1, src2); @@ -2768,7 +2736,7 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Generate_Mul32WithOverflowIfCCUnequal(msgfr); } -void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, +void MacroAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, const Operand& src2) { Generate_Mul32WithOverflowIfCCUnequal(msgfi); } @@ -2782,12 +2750,12 @@ void TurboAssembler::Mul32WithOverflowIfCCUnequal(Register dst, Register src1, LoadU32(dst, r1); \ } -void TurboAssembler::DivS32(Register dst, Register src1, +void MacroAssembler::DivS32(Register dst, Register src1, const MemOperand& src2) { Generate_Div32(dsgf); } -void TurboAssembler::DivS32(Register dst, Register src1, Register src2) { +void MacroAssembler::DivS32(Register dst, Register src1, Register src2) { Generate_Div32(dsgfr); } @@ -2801,12 +2769,12 @@ void TurboAssembler::DivS32(Register dst, Register src1, Register src2) { LoadU32(dst, r1); \ } -void TurboAssembler::DivU32(Register dst, Register src1, +void MacroAssembler::DivU32(Register dst, Register src1, const MemOperand& src2) { Generate_DivU32(dl); } -void TurboAssembler::DivU32(Register dst, Register src1, Register src2) { +void MacroAssembler::DivU32(Register dst, Register src1, Register src2) { Generate_DivU32(dlr); } @@ -2819,12 +2787,12 @@ void TurboAssembler::DivU32(Register dst, Register src1, Register src2) { lgr(dst, r1); \ } -void TurboAssembler::DivS64(Register dst, Register src1, +void MacroAssembler::DivS64(Register dst, Register src1, const MemOperand& src2) { Generate_Div64(dsg); } -void TurboAssembler::DivS64(Register dst, Register src1, Register src2) { +void MacroAssembler::DivS64(Register dst, Register src1, Register src2) { Generate_Div64(dsgr); } @@ -2838,12 +2806,12 @@ void TurboAssembler::DivS64(Register dst, Register src1, Register src2) { lgr(dst, r1); \ } -void TurboAssembler::DivU64(Register dst, Register src1, +void MacroAssembler::DivU64(Register dst, Register src1, const MemOperand& src2) { Generate_DivU64(dlg); } -void TurboAssembler::DivU64(Register dst, Register src1, Register src2) { +void MacroAssembler::DivU64(Register dst, Register src1, Register src2) { Generate_DivU64(dlgr); } @@ -2856,12 +2824,12 @@ void TurboAssembler::DivU64(Register dst, Register src1, Register src2) { LoadU32(dst, r0); \ } -void TurboAssembler::ModS32(Register dst, Register src1, +void MacroAssembler::ModS32(Register dst, Register src1, const MemOperand& src2) { Generate_Mod32(dsgf); } -void TurboAssembler::ModS32(Register dst, Register src1, Register src2) { +void MacroAssembler::ModS32(Register dst, Register src1, Register src2) { Generate_Mod32(dsgfr); } @@ -2875,12 +2843,12 @@ void TurboAssembler::ModS32(Register dst, Register src1, Register src2) { LoadU32(dst, r0); \ } -void TurboAssembler::ModU32(Register dst, Register src1, +void MacroAssembler::ModU32(Register dst, Register src1, const MemOperand& src2) { Generate_ModU32(dl); } -void TurboAssembler::ModU32(Register dst, Register src1, Register src2) { +void MacroAssembler::ModU32(Register dst, Register src1, Register src2) { Generate_ModU32(dlr); } @@ -2893,12 +2861,12 @@ void TurboAssembler::ModU32(Register dst, Register src1, Register src2) { lgr(dst, r0); \ } -void TurboAssembler::ModS64(Register dst, Register src1, +void MacroAssembler::ModS64(Register dst, Register src1, const MemOperand& src2) { Generate_Mod64(dsg); } -void TurboAssembler::ModS64(Register dst, Register src1, Register src2) { +void MacroAssembler::ModS64(Register dst, Register src1, Register src2) { Generate_Mod64(dsgr); } @@ -2912,54 +2880,54 @@ void TurboAssembler::ModS64(Register dst, Register src1, Register src2) { lgr(dst, r0); \ } -void TurboAssembler::ModU64(Register dst, Register src1, +void MacroAssembler::ModU64(Register dst, Register src1, const MemOperand& src2) { Generate_ModU64(dlg); } -void TurboAssembler::ModU64(Register dst, Register src1, Register src2) { +void MacroAssembler::ModU64(Register dst, Register src1, Register src2) { Generate_ModU64(dlgr); } #undef Generate_ModU64 -void TurboAssembler::MulS64(Register dst, const Operand& opnd) { +void MacroAssembler::MulS64(Register dst, const Operand& opnd) { msgfi(dst, opnd); } -void TurboAssembler::MulS64(Register dst, Register src) { msgr(dst, src); } +void MacroAssembler::MulS64(Register dst, Register src) { msgr(dst, src); } -void TurboAssembler::MulS64(Register dst, const MemOperand& opnd) { +void MacroAssembler::MulS64(Register dst, const MemOperand& opnd) { msg(dst, opnd); } -void TurboAssembler::MulHighS64(Register dst, Register src1, Register src2) { +void MacroAssembler::MulHighS64(Register dst, Register src1, Register src2) { mgrk(r0, src1, src2); lgr(dst, r0); } -void TurboAssembler::MulHighS64(Register dst, Register src1, +void MacroAssembler::MulHighS64(Register dst, Register src1, const MemOperand& src2) { // TODO(v8): implement this. UNIMPLEMENTED(); } -void TurboAssembler::MulHighU64(Register dst, Register src1, Register src2) { +void MacroAssembler::MulHighU64(Register dst, Register src1, Register src2) { lgr(r1, src1); mlgr(r0, src2); lgr(dst, r0); } -void TurboAssembler::MulHighU64(Register dst, Register src1, +void MacroAssembler::MulHighU64(Register dst, Register src1, const MemOperand& src2) { // TODO(v8): implement this. UNIMPLEMENTED(); } -void TurboAssembler::Sqrt(DoubleRegister result, DoubleRegister input) { +void MacroAssembler::Sqrt(DoubleRegister result, DoubleRegister input) { sqdbr(result, input); } -void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) { +void MacroAssembler::Sqrt(DoubleRegister result, const MemOperand& input) { if (is_uint12(input.offset())) { sqdb(result, input); } else { @@ -2972,7 +2940,7 @@ void TurboAssembler::Sqrt(DoubleRegister result, const MemOperand& input) { //---------------------------------------------------------------------------- // Add 32-bit (Register dst = Register dst + Immediate opnd) -void TurboAssembler::AddS32(Register dst, const Operand& opnd) { +void MacroAssembler::AddS32(Register dst, const Operand& opnd) { if (is_int16(opnd.immediate())) ahi(dst, opnd); else @@ -2980,19 +2948,19 @@ void TurboAssembler::AddS32(Register dst, const Operand& opnd) { } // Add Pointer Size (Register dst = Register dst + Immediate opnd) -void TurboAssembler::AddS64(Register dst, const Operand& opnd) { +void MacroAssembler::AddS64(Register dst, const Operand& opnd) { if (is_int16(opnd.immediate())) aghi(dst, opnd); else agfi(dst, opnd); } -void TurboAssembler::AddS32(Register dst, Register src, int32_t opnd) { +void MacroAssembler::AddS32(Register dst, Register src, int32_t opnd) { AddS32(dst, src, Operand(opnd)); } // Add 32-bit (Register dst = Register src + Immediate opnd) -void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::AddS32(Register dst, Register src, const Operand& opnd) { if (dst != src) { if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) { ahik(dst, src, opnd); @@ -3003,12 +2971,12 @@ void TurboAssembler::AddS32(Register dst, Register src, const Operand& opnd) { AddS32(dst, opnd); } -void TurboAssembler::AddS64(Register dst, Register src, int32_t opnd) { +void MacroAssembler::AddS64(Register dst, Register src, int32_t opnd) { AddS64(dst, src, Operand(opnd)); } // Add Pointer Size (Register dst = Register src + Immediate opnd) -void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::AddS64(Register dst, Register src, const Operand& opnd) { if (dst != src) { if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) { aghik(dst, src, opnd); @@ -3020,13 +2988,13 @@ void TurboAssembler::AddS64(Register dst, Register src, const Operand& opnd) { } // Add 32-bit (Register dst = Register dst + Register src) -void TurboAssembler::AddS32(Register dst, Register src) { ar(dst, src); } +void MacroAssembler::AddS32(Register dst, Register src) { ar(dst, src); } // Add Pointer Size (Register dst = Register dst + Register src) -void TurboAssembler::AddS64(Register dst, Register src) { agr(dst, src); } +void MacroAssembler::AddS64(Register dst, Register src) { agr(dst, src); } // Add 32-bit (Register dst = Register src1 + Register src2) -void TurboAssembler::AddS32(Register dst, Register src1, Register src2) { +void MacroAssembler::AddS32(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK // as AR is a smaller instruction @@ -3043,7 +3011,7 @@ void TurboAssembler::AddS32(Register dst, Register src1, Register src2) { } // Add Pointer Size (Register dst = Register src1 + Register src2) -void TurboAssembler::AddS64(Register dst, Register src1, Register src2) { +void MacroAssembler::AddS64(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK // as AR is a smaller instruction @@ -3060,7 +3028,7 @@ void TurboAssembler::AddS64(Register dst, Register src1, Register src2) { } // Add 32-bit (Register-Memory) -void TurboAssembler::AddS32(Register dst, const MemOperand& opnd) { +void MacroAssembler::AddS32(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) a(dst, opnd); @@ -3069,13 +3037,13 @@ void TurboAssembler::AddS32(Register dst, const MemOperand& opnd) { } // Add Pointer Size (Register-Memory) -void TurboAssembler::AddS64(Register dst, const MemOperand& opnd) { +void MacroAssembler::AddS64(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); ag(dst, opnd); } // Add 32-bit (Memory - Immediate) -void TurboAssembler::AddS32(const MemOperand& opnd, const Operand& imm) { +void MacroAssembler::AddS32(const MemOperand& opnd, const Operand& imm) { DCHECK(is_int8(imm.immediate())); DCHECK(is_int20(opnd.offset())); DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT)); @@ -3083,7 +3051,7 @@ void TurboAssembler::AddS32(const MemOperand& opnd, const Operand& imm) { } // Add Pointer-sized (Memory - Immediate) -void TurboAssembler::AddS64(const MemOperand& opnd, const Operand& imm) { +void MacroAssembler::AddS64(const MemOperand& opnd, const Operand& imm) { DCHECK(is_int8(imm.immediate())); DCHECK(is_int20(opnd.offset())); DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT)); @@ -3095,7 +3063,7 @@ void TurboAssembler::AddS64(const MemOperand& opnd, const Operand& imm) { //---------------------------------------------------------------------------- // Add Logical 32-bit (Register dst = Register src1 + Register src2) -void TurboAssembler::AddU32(Register dst, Register src1, Register src2) { +void MacroAssembler::AddU32(Register dst, Register src1, Register src2) { if (dst != src2 && dst != src1) { lr(dst, src1); alr(dst, src2); @@ -3111,16 +3079,16 @@ void TurboAssembler::AddU32(Register dst, Register src1, Register src2) { } // Add Logical 32-bit (Register dst = Register dst + Immediate opnd) -void TurboAssembler::AddU32(Register dst, const Operand& imm) { +void MacroAssembler::AddU32(Register dst, const Operand& imm) { alfi(dst, imm); } // Add Logical Pointer Size (Register dst = Register dst + Immediate opnd) -void TurboAssembler::AddU64(Register dst, const Operand& imm) { +void MacroAssembler::AddU64(Register dst, const Operand& imm) { algfi(dst, imm); } -void TurboAssembler::AddU64(Register dst, Register src1, Register src2) { +void MacroAssembler::AddU64(Register dst, Register src1, Register src2) { if (dst != src2 && dst != src1) { if (CpuFeatures::IsSupported(DISTINCT_OPS)) { algrk(dst, src1, src2); @@ -3140,7 +3108,7 @@ void TurboAssembler::AddU64(Register dst, Register src1, Register src2) { } // Add Logical 32-bit (Register-Memory) -void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) { +void MacroAssembler::AddU32(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) al_z(dst, opnd); @@ -3149,7 +3117,7 @@ void TurboAssembler::AddU32(Register dst, const MemOperand& opnd) { } // Add Logical Pointer Size (Register-Memory) -void TurboAssembler::AddU64(Register dst, const MemOperand& opnd) { +void MacroAssembler::AddU64(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); alg(dst, opnd); } @@ -3159,7 +3127,7 @@ void TurboAssembler::AddU64(Register dst, const MemOperand& opnd) { //---------------------------------------------------------------------------- // Subtract Logical 32-bit (Register dst = Register src1 - Register src2) -void TurboAssembler::SubU32(Register dst, Register src1, Register src2) { +void MacroAssembler::SubU32(Register dst, Register src1, Register src2) { if (dst != src2 && dst != src1) { lr(dst, src1); slr(dst, src2); @@ -3176,41 +3144,41 @@ void TurboAssembler::SubU32(Register dst, Register src1, Register src2) { } // Subtract 32-bit (Register dst = Register dst - Immediate opnd) -void TurboAssembler::SubS32(Register dst, const Operand& imm) { +void MacroAssembler::SubS32(Register dst, const Operand& imm) { AddS32(dst, Operand(-(imm.immediate()))); } // Subtract Pointer Size (Register dst = Register dst - Immediate opnd) -void TurboAssembler::SubS64(Register dst, const Operand& imm) { +void MacroAssembler::SubS64(Register dst, const Operand& imm) { AddS64(dst, Operand(-(imm.immediate()))); } -void TurboAssembler::SubS32(Register dst, Register src, int32_t imm) { +void MacroAssembler::SubS32(Register dst, Register src, int32_t imm) { SubS32(dst, src, Operand(imm)); } // Subtract 32-bit (Register dst = Register src - Immediate opnd) -void TurboAssembler::SubS32(Register dst, Register src, const Operand& imm) { +void MacroAssembler::SubS32(Register dst, Register src, const Operand& imm) { AddS32(dst, src, Operand(-(imm.immediate()))); } -void TurboAssembler::SubS64(Register dst, Register src, int32_t imm) { +void MacroAssembler::SubS64(Register dst, Register src, int32_t imm) { SubS64(dst, src, Operand(imm)); } // Subtract Pointer Sized (Register dst = Register src - Immediate opnd) -void TurboAssembler::SubS64(Register dst, Register src, const Operand& imm) { +void MacroAssembler::SubS64(Register dst, Register src, const Operand& imm) { AddS64(dst, src, Operand(-(imm.immediate()))); } // Subtract 32-bit (Register dst = Register dst - Register src) -void TurboAssembler::SubS32(Register dst, Register src) { sr(dst, src); } +void MacroAssembler::SubS32(Register dst, Register src) { sr(dst, src); } // Subtract Pointer Size (Register dst = Register dst - Register src) -void TurboAssembler::SubS64(Register dst, Register src) { sgr(dst, src); } +void MacroAssembler::SubS64(Register dst, Register src) { sgr(dst, src); } // Subtract 32-bit (Register = Register - Register) -void TurboAssembler::SubS32(Register dst, Register src1, Register src2) { +void MacroAssembler::SubS32(Register dst, Register src1, Register src2) { // Use non-clobbering version if possible if (CpuFeatures::IsSupported(DISTINCT_OPS)) { srk(dst, src1, src2); @@ -3230,7 +3198,7 @@ void TurboAssembler::SubS32(Register dst, Register src1, Register src2) { } // Subtract Pointer Sized (Register = Register - Register) -void TurboAssembler::SubS64(Register dst, Register src1, Register src2) { +void MacroAssembler::SubS64(Register dst, Register src1, Register src2) { // Use non-clobbering version if possible if (CpuFeatures::IsSupported(DISTINCT_OPS)) { sgrk(dst, src1, src2); @@ -3250,7 +3218,7 @@ void TurboAssembler::SubS64(Register dst, Register src1, Register src2) { } // Subtract 32-bit (Register-Memory) -void TurboAssembler::SubS32(Register dst, const MemOperand& opnd) { +void MacroAssembler::SubS32(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) s(dst, opnd); @@ -3259,7 +3227,7 @@ void TurboAssembler::SubS32(Register dst, const MemOperand& opnd) { } // Subtract Pointer Sized (Register - Memory) -void TurboAssembler::SubS64(Register dst, const MemOperand& opnd) { +void MacroAssembler::SubS64(Register dst, const MemOperand& opnd) { #if V8_TARGET_ARCH_S390X sg(dst, opnd); #else @@ -3267,24 +3235,24 @@ void TurboAssembler::SubS64(Register dst, const MemOperand& opnd) { #endif } -void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) { +void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) { sllg(r0, src, Operand(32)); ldgr(dst, r0); } -void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) { +void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) { lgdr(dst, src); srlg(dst, dst, Operand(32)); } // Load And Subtract 32-bit (similar to laa/lan/lao/lax) -void TurboAssembler::LoadAndSub32(Register dst, Register src, +void MacroAssembler::LoadAndSub32(Register dst, Register src, const MemOperand& opnd) { lcr(dst, src); laa(dst, dst, opnd); } -void TurboAssembler::LoadAndSub64(Register dst, Register src, +void MacroAssembler::LoadAndSub64(Register dst, Register src, const MemOperand& opnd) { lcgr(dst, src); laag(dst, dst, opnd); @@ -3295,7 +3263,7 @@ void TurboAssembler::LoadAndSub64(Register dst, Register src, //---------------------------------------------------------------------------- // Subtract Logical 32-bit (Register - Memory) -void TurboAssembler::SubU32(Register dst, const MemOperand& opnd) { +void MacroAssembler::SubU32(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) sl(dst, opnd); @@ -3304,7 +3272,7 @@ void TurboAssembler::SubU32(Register dst, const MemOperand& opnd) { } // Subtract Logical Pointer Sized (Register - Memory) -void TurboAssembler::SubU64(Register dst, const MemOperand& opnd) { +void MacroAssembler::SubU64(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X slgf(dst, opnd); @@ -3318,13 +3286,13 @@ void TurboAssembler::SubU64(Register dst, const MemOperand& opnd) { //---------------------------------------------------------------------------- // AND 32-bit - dst = dst & src -void TurboAssembler::And(Register dst, Register src) { nr(dst, src); } +void MacroAssembler::And(Register dst, Register src) { nr(dst, src); } // AND Pointer Size - dst = dst & src -void TurboAssembler::AndP(Register dst, Register src) { ngr(dst, src); } +void MacroAssembler::AndP(Register dst, Register src) { ngr(dst, src); } // Non-clobbering AND 32-bit - dst = src1 & src1 -void TurboAssembler::And(Register dst, Register src1, Register src2) { +void MacroAssembler::And(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3341,7 +3309,7 @@ void TurboAssembler::And(Register dst, Register src1, Register src2) { } // Non-clobbering AND pointer size - dst = src1 & src1 -void TurboAssembler::AndP(Register dst, Register src1, Register src2) { +void MacroAssembler::AndP(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3358,7 +3326,7 @@ void TurboAssembler::AndP(Register dst, Register src1, Register src2) { } // AND 32-bit (Reg - Mem) -void TurboAssembler::And(Register dst, const MemOperand& opnd) { +void MacroAssembler::And(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) n(dst, opnd); @@ -3367,7 +3335,7 @@ void TurboAssembler::And(Register dst, const MemOperand& opnd) { } // AND Pointer Size (Reg - Mem) -void TurboAssembler::AndP(Register dst, const MemOperand& opnd) { +void MacroAssembler::AndP(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X ng(dst, opnd); @@ -3377,10 +3345,10 @@ void TurboAssembler::AndP(Register dst, const MemOperand& opnd) { } // AND 32-bit - dst = dst & imm -void TurboAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); } +void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); } // AND Pointer Size - dst = dst & imm -void TurboAssembler::AndP(Register dst, const Operand& opnd) { +void MacroAssembler::AndP(Register dst, const Operand& opnd) { #if V8_TARGET_ARCH_S390X intptr_t value = opnd.immediate(); if (value >> 32 != -1) { @@ -3394,13 +3362,13 @@ void TurboAssembler::AndP(Register dst, const Operand& opnd) { } // AND 32-bit - dst = src & imm -void TurboAssembler::And(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::And(Register dst, Register src, const Operand& opnd) { if (dst != src) lr(dst, src); nilf(dst, opnd); } // AND Pointer Size - dst = src & imm -void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) { // Try to exploit RISBG first intptr_t value = opnd.immediate(); if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { @@ -3441,13 +3409,13 @@ void TurboAssembler::AndP(Register dst, Register src, const Operand& opnd) { } // OR 32-bit - dst = dst & src -void TurboAssembler::Or(Register dst, Register src) { or_z(dst, src); } +void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); } // OR Pointer Size - dst = dst & src -void TurboAssembler::OrP(Register dst, Register src) { ogr(dst, src); } +void MacroAssembler::OrP(Register dst, Register src) { ogr(dst, src); } // Non-clobbering OR 32-bit - dst = src1 & src1 -void TurboAssembler::Or(Register dst, Register src1, Register src2) { +void MacroAssembler::Or(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3464,7 +3432,7 @@ void TurboAssembler::Or(Register dst, Register src1, Register src2) { } // Non-clobbering OR pointer size - dst = src1 & src1 -void TurboAssembler::OrP(Register dst, Register src1, Register src2) { +void MacroAssembler::OrP(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3481,7 +3449,7 @@ void TurboAssembler::OrP(Register dst, Register src1, Register src2) { } // OR 32-bit (Reg - Mem) -void TurboAssembler::Or(Register dst, const MemOperand& opnd) { +void MacroAssembler::Or(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) o(dst, opnd); @@ -3490,7 +3458,7 @@ void TurboAssembler::Or(Register dst, const MemOperand& opnd) { } // OR Pointer Size (Reg - Mem) -void TurboAssembler::OrP(Register dst, const MemOperand& opnd) { +void MacroAssembler::OrP(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X og(dst, opnd); @@ -3500,10 +3468,10 @@ void TurboAssembler::OrP(Register dst, const MemOperand& opnd) { } // OR 32-bit - dst = dst & imm -void TurboAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); } +void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); } // OR Pointer Size - dst = dst & imm -void TurboAssembler::OrP(Register dst, const Operand& opnd) { +void MacroAssembler::OrP(Register dst, const Operand& opnd) { #if V8_TARGET_ARCH_S390X intptr_t value = opnd.immediate(); if (value >> 32 != 0) { @@ -3517,25 +3485,25 @@ void TurboAssembler::OrP(Register dst, const Operand& opnd) { } // OR 32-bit - dst = src & imm -void TurboAssembler::Or(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) { if (dst != src) lr(dst, src); oilf(dst, opnd); } // OR Pointer Size - dst = src & imm -void TurboAssembler::OrP(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) { if (dst != src) mov(dst, src); OrP(dst, opnd); } // XOR 32-bit - dst = dst & src -void TurboAssembler::Xor(Register dst, Register src) { xr(dst, src); } +void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); } // XOR Pointer Size - dst = dst & src -void TurboAssembler::XorP(Register dst, Register src) { xgr(dst, src); } +void MacroAssembler::XorP(Register dst, Register src) { xgr(dst, src); } // Non-clobbering XOR 32-bit - dst = src1 & src1 -void TurboAssembler::Xor(Register dst, Register src1, Register src2) { +void MacroAssembler::Xor(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3552,7 +3520,7 @@ void TurboAssembler::Xor(Register dst, Register src1, Register src2) { } // Non-clobbering XOR pointer size - dst = src1 & src1 -void TurboAssembler::XorP(Register dst, Register src1, Register src2) { +void MacroAssembler::XorP(Register dst, Register src1, Register src2) { if (dst != src1 && dst != src2) { // We prefer to generate XR/XGR, over the non clobbering XRK/XRK // as XR is a smaller instruction @@ -3569,7 +3537,7 @@ void TurboAssembler::XorP(Register dst, Register src1, Register src2) { } // XOR 32-bit (Reg - Mem) -void TurboAssembler::Xor(Register dst, const MemOperand& opnd) { +void MacroAssembler::Xor(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) x(dst, opnd); @@ -3578,7 +3546,7 @@ void TurboAssembler::Xor(Register dst, const MemOperand& opnd) { } // XOR Pointer Size (Reg - Mem) -void TurboAssembler::XorP(Register dst, const MemOperand& opnd) { +void MacroAssembler::XorP(Register dst, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X xg(dst, opnd); @@ -3588,10 +3556,10 @@ void TurboAssembler::XorP(Register dst, const MemOperand& opnd) { } // XOR 32-bit - dst = dst & imm -void TurboAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); } +void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); } // XOR Pointer Size - dst = dst & imm -void TurboAssembler::XorP(Register dst, const Operand& opnd) { +void MacroAssembler::XorP(Register dst, const Operand& opnd) { #if V8_TARGET_ARCH_S390X intptr_t value = opnd.immediate(); xihf(dst, Operand(value >> 32)); @@ -3602,29 +3570,29 @@ void TurboAssembler::XorP(Register dst, const Operand& opnd) { } // XOR 32-bit - dst = src & imm -void TurboAssembler::Xor(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) { if (dst != src) lr(dst, src); xilf(dst, opnd); } // XOR Pointer Size - dst = src & imm -void TurboAssembler::XorP(Register dst, Register src, const Operand& opnd) { +void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) { if (dst != src) mov(dst, src); XorP(dst, opnd); } -void TurboAssembler::Not32(Register dst, Register src) { +void MacroAssembler::Not32(Register dst, Register src) { if (src != no_reg && src != dst) lr(dst, src); xilf(dst, Operand(0xFFFFFFFF)); } -void TurboAssembler::Not64(Register dst, Register src) { +void MacroAssembler::Not64(Register dst, Register src) { if (src != no_reg && src != dst) lgr(dst, src); xihf(dst, Operand(0xFFFFFFFF)); xilf(dst, Operand(0xFFFFFFFF)); } -void TurboAssembler::NotP(Register dst, Register src) { +void MacroAssembler::NotP(Register dst, Register src) { #if V8_TARGET_ARCH_S390X Not64(dst, src); #else @@ -3632,7 +3600,7 @@ void TurboAssembler::NotP(Register dst, Register src) { #endif } -void TurboAssembler::LoadPositiveP(Register result, Register input) { +void MacroAssembler::LoadPositiveP(Register result, Register input) { #if V8_TARGET_ARCH_S390X lpgr(result, input); #else @@ -3640,7 +3608,7 @@ void TurboAssembler::LoadPositiveP(Register result, Register input) { #endif } -void TurboAssembler::LoadPositive32(Register result, Register input) { +void MacroAssembler::LoadPositive32(Register result, Register input) { lpr(result, input); lgfr(result, result); } @@ -3650,14 +3618,14 @@ void TurboAssembler::LoadPositive32(Register result, Register input) { //----------------------------------------------------------------------------- // Compare 32-bit Register vs Register -void TurboAssembler::CmpS32(Register src1, Register src2) { cr_z(src1, src2); } +void MacroAssembler::CmpS32(Register src1, Register src2) { cr_z(src1, src2); } // Compare Pointer Sized Register vs Register -void TurboAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); } +void MacroAssembler::CmpS64(Register src1, Register src2) { cgr(src1, src2); } // Compare 32-bit Register vs Immediate // This helper will set up proper relocation entries if required. -void TurboAssembler::CmpS32(Register dst, const Operand& opnd) { +void MacroAssembler::CmpS32(Register dst, const Operand& opnd) { if (opnd.rmode() == RelocInfo::NO_INFO) { intptr_t value = opnd.immediate(); if (is_int16(value)) @@ -3673,7 +3641,7 @@ void TurboAssembler::CmpS32(Register dst, const Operand& opnd) { // Compare Pointer Sized Register vs Immediate // This helper will set up proper relocation entries if required. -void TurboAssembler::CmpS64(Register dst, const Operand& opnd) { +void MacroAssembler::CmpS64(Register dst, const Operand& opnd) { if (opnd.rmode() == RelocInfo::NO_INFO) { cgfi(dst, opnd); } else { @@ -3683,7 +3651,7 @@ void TurboAssembler::CmpS64(Register dst, const Operand& opnd) { } // Compare 32-bit Register vs Memory -void TurboAssembler::CmpS32(Register dst, const MemOperand& opnd) { +void MacroAssembler::CmpS32(Register dst, const MemOperand& opnd) { // make sure offset is within 20 bit range DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) @@ -3693,14 +3661,14 @@ void TurboAssembler::CmpS32(Register dst, const MemOperand& opnd) { } // Compare Pointer Size Register vs Memory -void TurboAssembler::CmpS64(Register dst, const MemOperand& opnd) { +void MacroAssembler::CmpS64(Register dst, const MemOperand& opnd) { // make sure offset is within 20 bit range DCHECK(is_int20(opnd.offset())); cg(dst, opnd); } // Using cs or scy based on the offset -void TurboAssembler::CmpAndSwap(Register old_val, Register new_val, +void MacroAssembler::CmpAndSwap(Register old_val, Register new_val, const MemOperand& opnd) { if (is_uint12(opnd.offset())) { cs(old_val, new_val, opnd); @@ -3709,7 +3677,7 @@ void TurboAssembler::CmpAndSwap(Register old_val, Register new_val, } } -void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val, +void MacroAssembler::CmpAndSwap64(Register old_val, Register new_val, const MemOperand& opnd) { DCHECK(is_int20(opnd.offset())); csg(old_val, new_val, opnd); @@ -3720,10 +3688,10 @@ void TurboAssembler::CmpAndSwap64(Register old_val, Register new_val, //----------------------------------------------------------------------------- // Compare Logical 32-bit Register vs Register -void TurboAssembler::CmpU32(Register dst, Register src) { clr(dst, src); } +void MacroAssembler::CmpU32(Register dst, Register src) { clr(dst, src); } // Compare Logical Pointer Sized Register vs Register -void TurboAssembler::CmpU64(Register dst, Register src) { +void MacroAssembler::CmpU64(Register dst, Register src) { #ifdef V8_TARGET_ARCH_S390X clgr(dst, src); #else @@ -3732,12 +3700,12 @@ void TurboAssembler::CmpU64(Register dst, Register src) { } // Compare Logical 32-bit Register vs Immediate -void TurboAssembler::CmpU32(Register dst, const Operand& opnd) { +void MacroAssembler::CmpU32(Register dst, const Operand& opnd) { clfi(dst, opnd); } // Compare Logical Pointer Sized Register vs Immediate -void TurboAssembler::CmpU64(Register dst, const Operand& opnd) { +void MacroAssembler::CmpU64(Register dst, const Operand& opnd) { #if V8_TARGET_ARCH_S390X DCHECK_EQ(static_cast(opnd.immediate() >> 32), 0); clgfi(dst, opnd); @@ -3747,7 +3715,7 @@ void TurboAssembler::CmpU64(Register dst, const Operand& opnd) { } // Compare Logical 32-bit Register vs Memory -void TurboAssembler::CmpU32(Register dst, const MemOperand& opnd) { +void MacroAssembler::CmpU32(Register dst, const MemOperand& opnd) { // make sure offset is within 20 bit range DCHECK(is_int20(opnd.offset())); if (is_uint12(opnd.offset())) @@ -3757,7 +3725,7 @@ void TurboAssembler::CmpU32(Register dst, const MemOperand& opnd) { } // Compare Logical Pointer Sized Register vs Memory -void TurboAssembler::CmpU64(Register dst, const MemOperand& opnd) { +void MacroAssembler::CmpU64(Register dst, const MemOperand& opnd) { // make sure offset is within 20 bit range DCHECK(is_int20(opnd.offset())); #if V8_TARGET_ARCH_S390X @@ -3767,7 +3735,7 @@ void TurboAssembler::CmpU64(Register dst, const MemOperand& opnd) { #endif } -void TurboAssembler::Branch(Condition c, const Operand& opnd) { +void MacroAssembler::Branch(Condition c, const Operand& opnd) { intptr_t value = opnd.immediate(); if (is_int16(value)) brc(c, opnd); @@ -3776,7 +3744,7 @@ void TurboAssembler::Branch(Condition c, const Operand& opnd) { } // Branch On Count. Decrement R1, and branch if R1 != 0. -void TurboAssembler::BranchOnCount(Register r1, Label* l) { +void MacroAssembler::BranchOnCount(Register r1, Label* l) { int32_t offset = branch_offset(l); if (is_int16(offset)) { #if V8_TARGET_ARCH_S390X @@ -3790,7 +3758,7 @@ void TurboAssembler::BranchOnCount(Register r1, Label* l) { } } -void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) { +void MacroAssembler::LoadSmiLiteral(Register dst, Smi smi) { intptr_t value = static_cast(smi.ptr()); #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) llilf(dst, Operand(value)); @@ -3801,7 +3769,7 @@ void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) { #endif } -void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) { +void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) { #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH) // CFI takes 32-bit immediate. cfi(src1, Operand(smi)); @@ -3815,7 +3783,7 @@ void TurboAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch) { #endif } -void TurboAssembler::LoadU64(Register dst, const MemOperand& mem, +void MacroAssembler::LoadU64(Register dst, const MemOperand& mem, Register scratch) { int offset = mem.offset(); @@ -3830,7 +3798,7 @@ void TurboAssembler::LoadU64(Register dst, const MemOperand& mem, } // Store a "pointer" sized value to the memory location -void TurboAssembler::StoreU64(Register src, const MemOperand& mem, +void MacroAssembler::StoreU64(Register src, const MemOperand& mem, Register scratch) { if (!is_int20(mem.offset())) { DCHECK(scratch != no_reg); @@ -3843,7 +3811,7 @@ void TurboAssembler::StoreU64(Register src, const MemOperand& mem, } // Store a "pointer" sized constant to the memory location -void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd, +void MacroAssembler::StoreU64(const MemOperand& mem, const Operand& opnd, Register scratch) { // Relocations not supported DCHECK_EQ(opnd.rmode(), RelocInfo::NO_INFO); @@ -3858,7 +3826,7 @@ void TurboAssembler::StoreU64(const MemOperand& mem, const Operand& opnd, } } -void TurboAssembler::LoadMultipleP(Register dst1, Register dst2, +void MacroAssembler::LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem) { #if V8_TARGET_ARCH_S390X DCHECK(is_int20(mem.offset())); @@ -3873,7 +3841,7 @@ void TurboAssembler::LoadMultipleP(Register dst1, Register dst2, #endif } -void TurboAssembler::StoreMultipleP(Register src1, Register src2, +void MacroAssembler::StoreMultipleP(Register src1, Register src2, const MemOperand& mem) { #if V8_TARGET_ARCH_S390X DCHECK(is_int20(mem.offset())); @@ -3888,7 +3856,7 @@ void TurboAssembler::StoreMultipleP(Register src1, Register src2, #endif } -void TurboAssembler::LoadMultipleW(Register dst1, Register dst2, +void MacroAssembler::LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem) { if (is_uint12(mem.offset())) { lm(dst1, dst2, mem); @@ -3898,7 +3866,7 @@ void TurboAssembler::LoadMultipleW(Register dst1, Register dst2, } } -void TurboAssembler::StoreMultipleW(Register src1, Register src2, +void MacroAssembler::StoreMultipleW(Register src1, Register src2, const MemOperand& mem) { if (is_uint12(mem.offset())) { stm(src1, src2, mem); @@ -3909,7 +3877,7 @@ void TurboAssembler::StoreMultipleW(Register src1, Register src2, } // Load 32-bits and sign extend if necessary. -void TurboAssembler::LoadS32(Register dst, Register src) { +void MacroAssembler::LoadS32(Register dst, Register src) { #if V8_TARGET_ARCH_S390X lgfr(dst, src); #else @@ -3918,8 +3886,8 @@ void TurboAssembler::LoadS32(Register dst, Register src) { } // Load 32-bits and sign extend if necessary. -void TurboAssembler::LoadS32(Register dst, const MemOperand& mem, - Register scratch) { +void MacroAssembler::LoadS32(Register dst, const MemOperand& mem, + Register scratch) { int offset = mem.offset(); if (!is_int20(offset)) { @@ -3944,7 +3912,7 @@ void TurboAssembler::LoadS32(Register dst, const MemOperand& mem, } // Load 32-bits and zero extend if necessary. -void TurboAssembler::LoadU32(Register dst, Register src) { +void MacroAssembler::LoadU32(Register dst, Register src) { #if V8_TARGET_ARCH_S390X llgfr(dst, src); #else @@ -3954,8 +3922,8 @@ void TurboAssembler::LoadU32(Register dst, Register src) { // Variable length depending on whether offset fits into immediate field // MemOperand of RX or RXY format -void TurboAssembler::LoadU32(Register dst, const MemOperand& mem, - Register scratch) { +void MacroAssembler::LoadU32(Register dst, const MemOperand& mem, + Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -3995,7 +3963,7 @@ void TurboAssembler::LoadU32(Register dst, const MemOperand& mem, #endif } -void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadU16(Register dst, const MemOperand& mem) { // TODO(s390x): Add scratch reg #if V8_TARGET_ARCH_S390X llgh(dst, mem); @@ -4004,7 +3972,7 @@ void TurboAssembler::LoadU16(Register dst, const MemOperand& mem) { #endif } -void TurboAssembler::LoadU16(Register dst, Register src) { +void MacroAssembler::LoadU16(Register dst, Register src) { #if V8_TARGET_ARCH_S390X llghr(dst, src); #else @@ -4012,7 +3980,7 @@ void TurboAssembler::LoadU16(Register dst, Register src) { #endif } -void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadS8(Register dst, const MemOperand& mem) { // TODO(s390x): Add scratch reg #if V8_TARGET_ARCH_S390X lgb(dst, mem); @@ -4021,7 +3989,7 @@ void TurboAssembler::LoadS8(Register dst, const MemOperand& mem) { #endif } -void TurboAssembler::LoadS8(Register dst, Register src) { +void MacroAssembler::LoadS8(Register dst, Register src) { #if V8_TARGET_ARCH_S390X lgbr(dst, src); #else @@ -4029,7 +3997,7 @@ void TurboAssembler::LoadS8(Register dst, Register src) { #endif } -void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadU8(Register dst, const MemOperand& mem) { // TODO(s390x): Add scratch reg #if V8_TARGET_ARCH_S390X llgc(dst, mem); @@ -4038,7 +4006,7 @@ void TurboAssembler::LoadU8(Register dst, const MemOperand& mem) { #endif } -void TurboAssembler::LoadU8(Register dst, Register src) { +void MacroAssembler::LoadU8(Register dst, Register src) { #if V8_TARGET_ARCH_S390X llgcr(dst, src); #else @@ -4047,34 +4015,34 @@ void TurboAssembler::LoadU8(Register dst, Register src) { } #ifdef V8_TARGET_BIG_ENDIAN -void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem, +void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem, Register scratch) { lrvg(dst, mem); } -void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd, +void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd, Register scratch) { lrv(dst, opnd); LoadS32(dst, dst); } -void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd, +void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd, Register scratch) { lrv(dst, opnd); LoadU32(dst, dst); } -void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) { +void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) { lrvh(dst, opnd); LoadU16(dst, dst); } -void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) { +void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) { lrvh(dst, opnd); LoadS16(dst, dst); } -void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, Register scratch0, Register scratch1) { bool use_vlbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && is_uint12(opnd.offset()); @@ -4088,20 +4056,20 @@ void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch) { lrvg(scratch, opnd); ldgr(dst, scratch); } -void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch) { lrv(scratch, opnd); ShiftLeftU64(scratch, scratch, Operand(32)); ldgr(dst, scratch); } -void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem, Register scratch) { if (!is_int20(mem.offset())) { DCHECK(scratch != no_reg); @@ -4113,7 +4081,7 @@ void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem, } } -void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem, Register scratch) { if (!is_int20(mem.offset())) { DCHECK(scratch != no_reg); @@ -4125,7 +4093,7 @@ void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem, } } -void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem, Register scratch) { if (!is_int20(mem.offset())) { DCHECK(scratch != no_reg); @@ -4137,14 +4105,14 @@ void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem, } } -void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd, +void MacroAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd, Register scratch) { DCHECK(is_uint12(opnd.offset())); lgdr(scratch, src); strvg(scratch, opnd); } -void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, +void MacroAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, Register scratch) { DCHECK(is_uint12(opnd.offset())); lgdr(scratch, src); @@ -4152,7 +4120,7 @@ void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, strv(scratch, opnd); } -void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, +void MacroAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, Register scratch1, Register scratch2) { bool use_vstbr = CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && is_uint12(mem.offset()); @@ -4168,73 +4136,73 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, } #else -void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem, +void MacroAssembler::LoadU64LE(Register dst, const MemOperand& mem, Register scratch) { LoadU64(dst, mem, scratch); } -void TurboAssembler::LoadS32LE(Register dst, const MemOperand& opnd, +void MacroAssembler::LoadS32LE(Register dst, const MemOperand& opnd, Register scratch) { LoadS32(dst, opnd, scratch); } -void TurboAssembler::LoadU32LE(Register dst, const MemOperand& opnd, +void MacroAssembler::LoadU32LE(Register dst, const MemOperand& opnd, Register scratch) { LoadU32(dst, opnd, scratch); } -void TurboAssembler::LoadU16LE(Register dst, const MemOperand& opnd) { +void MacroAssembler::LoadU16LE(Register dst, const MemOperand& opnd) { LoadU16(dst, opnd); } -void TurboAssembler::LoadS16LE(Register dst, const MemOperand& opnd) { +void MacroAssembler::LoadS16LE(Register dst, const MemOperand& opnd) { LoadS16(dst, opnd); } -void TurboAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadV128LE(DoubleRegister dst, const MemOperand& opnd, Register scratch0, Register scratch1) { USE(scratch1); LoadV128(dst, opnd, scratch0); } -void TurboAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch) { USE(scratch); LoadF64(dst, opnd); } -void TurboAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch) { USE(scratch); LoadF32(dst, opnd); } -void TurboAssembler::StoreU64LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU64LE(Register src, const MemOperand& mem, Register scratch) { StoreU64(src, mem, scratch); } -void TurboAssembler::StoreU32LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU32LE(Register src, const MemOperand& mem, Register scratch) { StoreU32(src, mem, scratch); } -void TurboAssembler::StoreU16LE(Register src, const MemOperand& mem, +void MacroAssembler::StoreU16LE(Register src, const MemOperand& mem, Register scratch) { StoreU16(src, mem, scratch); } -void TurboAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd, +void MacroAssembler::StoreF64LE(DoubleRegister src, const MemOperand& opnd, Register scratch) { StoreF64(src, opnd); } -void TurboAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, +void MacroAssembler::StoreF32LE(DoubleRegister src, const MemOperand& opnd, Register scratch) { StoreF32(src, opnd); } -void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, +void MacroAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, Register scratch1, Register scratch2) { StoreV128(src, mem, scratch1); } @@ -4242,12 +4210,12 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem, #endif // Load And Test (Reg <- Reg) -void TurboAssembler::LoadAndTest32(Register dst, Register src) { +void MacroAssembler::LoadAndTest32(Register dst, Register src) { ltr(dst, src); } // Load And Test Pointer Sized (Reg <- Reg) -void TurboAssembler::LoadAndTestP(Register dst, Register src) { +void MacroAssembler::LoadAndTestP(Register dst, Register src) { #if V8_TARGET_ARCH_S390X ltgr(dst, src); #else @@ -4256,12 +4224,12 @@ void TurboAssembler::LoadAndTestP(Register dst, Register src) { } // Load And Test 32-bit (Reg <- Mem) -void TurboAssembler::LoadAndTest32(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) { lt_z(dst, mem); } // Load And Test Pointer Sized (Reg <- Mem) -void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) { +void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) { #if V8_TARGET_ARCH_S390X ltg(dst, mem); #else @@ -4270,7 +4238,7 @@ void TurboAssembler::LoadAndTestP(Register dst, const MemOperand& mem) { } // Load On Condition Pointer Sized (Reg <- Reg) -void TurboAssembler::LoadOnConditionP(Condition cond, Register dst, +void MacroAssembler::LoadOnConditionP(Condition cond, Register dst, Register src) { #if V8_TARGET_ARCH_S390X locgr(cond, dst, src); @@ -4280,7 +4248,7 @@ void TurboAssembler::LoadOnConditionP(Condition cond, Register dst, } // Load Double Precision (64-bit) Floating Point number from memory -void TurboAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) { +void MacroAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) { // for 32bit and 64bit we all use 64bit floating point regs if (is_uint12(mem.offset())) { ld(dst, mem); @@ -4290,7 +4258,7 @@ void TurboAssembler::LoadF64(DoubleRegister dst, const MemOperand& mem) { } // Load Single Precision (32-bit) Floating Point number from memory -void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) { +void MacroAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) { if (is_uint12(mem.offset())) { le_z(dst, mem); } else { @@ -4299,7 +4267,7 @@ void TurboAssembler::LoadF32(DoubleRegister dst, const MemOperand& mem) { } } -void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadV128(Simd128Register dst, const MemOperand& mem, Register scratch) { DCHECK(scratch != r0); if (is_uint12(mem.offset())) { @@ -4312,7 +4280,7 @@ void TurboAssembler::LoadV128(Simd128Register dst, const MemOperand& mem, } // Store Double Precision (64-bit) Floating Point number to memory -void TurboAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) { +void MacroAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) { if (is_uint12(mem.offset())) { std(dst, mem); } else { @@ -4321,7 +4289,7 @@ void TurboAssembler::StoreF64(DoubleRegister dst, const MemOperand& mem) { } // Store Single Precision (32-bit) Floating Point number to memory -void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) { +void MacroAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) { if (is_uint12(mem.offset())) { ste(src, mem); } else { @@ -4329,7 +4297,7 @@ void TurboAssembler::StoreF32(DoubleRegister src, const MemOperand& mem) { } } -void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem, +void MacroAssembler::StoreV128(Simd128Register src, const MemOperand& mem, Register scratch) { DCHECK(scratch != r0); if (is_uint12(mem.offset())) { @@ -4341,7 +4309,7 @@ void TurboAssembler::StoreV128(Simd128Register src, const MemOperand& mem, } } -void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { aebr(dst, rhs); @@ -4353,7 +4321,7 @@ void TurboAssembler::AddF32(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { sebr(dst, rhs); @@ -4366,7 +4334,7 @@ void TurboAssembler::SubF32(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { meebr(dst, rhs); @@ -4378,7 +4346,7 @@ void TurboAssembler::MulF32(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { debr(dst, rhs); @@ -4394,7 +4362,7 @@ void TurboAssembler::DivF32(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { adbr(dst, rhs); @@ -4406,7 +4374,7 @@ void TurboAssembler::AddF64(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { sdbr(dst, rhs); @@ -4419,7 +4387,7 @@ void TurboAssembler::SubF64(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { mdbr(dst, rhs); @@ -4431,7 +4399,7 @@ void TurboAssembler::MulF64(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, +void MacroAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { if (dst == lhs) { ddbr(dst, rhs); @@ -4447,7 +4415,7 @@ void TurboAssembler::DivF64(DoubleRegister dst, DoubleRegister lhs, } } -void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { aeb(dst, opnd); @@ -4457,7 +4425,7 @@ void TurboAssembler::AddFloat32(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { adb(dst, opnd); @@ -4467,7 +4435,7 @@ void TurboAssembler::AddFloat64(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { seb(dst, opnd); @@ -4477,7 +4445,7 @@ void TurboAssembler::SubFloat32(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { sdb(dst, opnd); @@ -4487,7 +4455,7 @@ void TurboAssembler::SubFloat64(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { meeb(dst, opnd); @@ -4497,7 +4465,7 @@ void TurboAssembler::MulFloat32(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { mdb(dst, opnd); @@ -4507,7 +4475,7 @@ void TurboAssembler::MulFloat64(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { deb(dst, opnd); @@ -4517,7 +4485,7 @@ void TurboAssembler::DivFloat32(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { ddb(dst, opnd); @@ -4527,7 +4495,7 @@ void TurboAssembler::DivFloat64(DoubleRegister dst, const MemOperand& opnd, } } -void TurboAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd, +void MacroAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd, DoubleRegister scratch) { if (is_uint12(opnd.offset())) { ldeb(dst, opnd); @@ -4539,7 +4507,7 @@ void TurboAssembler::LoadF32AsF64(DoubleRegister dst, const MemOperand& opnd, // Variable length depending on whether offset fits into immediate field // MemOperand of RX or RXY format -void TurboAssembler::StoreU32(Register src, const MemOperand& mem, +void MacroAssembler::StoreU32(Register src, const MemOperand& mem, Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -4570,7 +4538,7 @@ void TurboAssembler::StoreU32(Register src, const MemOperand& mem, } } -void TurboAssembler::LoadS16(Register dst, Register src) { +void MacroAssembler::LoadS16(Register dst, Register src) { #if V8_TARGET_ARCH_S390X lghr(dst, src); #else @@ -4580,8 +4548,8 @@ void TurboAssembler::LoadS16(Register dst, Register src) { // Loads 16-bits half-word value from memory and sign extends to pointer // sized register -void TurboAssembler::LoadS16(Register dst, const MemOperand& mem, - Register scratch) { +void MacroAssembler::LoadS16(Register dst, const MemOperand& mem, + Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -4608,7 +4576,7 @@ void TurboAssembler::LoadS16(Register dst, const MemOperand& mem, // Variable length depending on whether offset fits into immediate field // MemOperand current only supports d-form -void TurboAssembler::StoreU16(Register src, const MemOperand& mem, +void MacroAssembler::StoreU16(Register src, const MemOperand& mem, Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -4626,7 +4594,7 @@ void TurboAssembler::StoreU16(Register src, const MemOperand& mem, // Variable length depending on whether offset fits into immediate field // MemOperand current only supports d-form -void TurboAssembler::StoreU8(Register src, const MemOperand& mem, +void MacroAssembler::StoreU8(Register src, const MemOperand& mem, Register scratch) { Register base = mem.rb(); int offset = mem.offset(); @@ -4643,13 +4611,13 @@ void TurboAssembler::StoreU8(Register src, const MemOperand& mem, } // Shift left logical for 32-bit integer types. -void TurboAssembler::ShiftLeftU32(Register dst, Register src, +void MacroAssembler::ShiftLeftU32(Register dst, Register src, const Operand& val) { ShiftLeftU32(dst, src, r0, val); } // Shift left logical for 32-bit integer types. -void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register val, +void MacroAssembler::ShiftLeftU32(Register dst, Register src, Register val, const Operand& val2) { if (dst == src) { sll(dst, val, val2); @@ -4663,25 +4631,25 @@ void TurboAssembler::ShiftLeftU32(Register dst, Register src, Register val, } // Shift left logical for 32-bit integer types. -void TurboAssembler::ShiftLeftU64(Register dst, Register src, +void MacroAssembler::ShiftLeftU64(Register dst, Register src, const Operand& val) { ShiftLeftU64(dst, src, r0, val); } // Shift left logical for 32-bit integer types. -void TurboAssembler::ShiftLeftU64(Register dst, Register src, Register val, +void MacroAssembler::ShiftLeftU64(Register dst, Register src, Register val, const Operand& val2) { sllg(dst, src, val, val2); } // Shift right logical for 32-bit integer types. -void TurboAssembler::ShiftRightU32(Register dst, Register src, +void MacroAssembler::ShiftRightU32(Register dst, Register src, const Operand& val) { ShiftRightU32(dst, src, r0, val); } // Shift right logical for 32-bit integer types. -void TurboAssembler::ShiftRightU32(Register dst, Register src, Register val, +void MacroAssembler::ShiftRightU32(Register dst, Register src, Register val, const Operand& val2) { if (dst == src) { srl(dst, val, val2); @@ -4694,25 +4662,25 @@ void TurboAssembler::ShiftRightU32(Register dst, Register src, Register val, } } -void TurboAssembler::ShiftRightU64(Register dst, Register src, Register val, +void MacroAssembler::ShiftRightU64(Register dst, Register src, Register val, const Operand& val2) { srlg(dst, src, val, val2); } // Shift right logical for 64-bit integer types. -void TurboAssembler::ShiftRightU64(Register dst, Register src, +void MacroAssembler::ShiftRightU64(Register dst, Register src, const Operand& val) { ShiftRightU64(dst, src, r0, val); } // Shift right arithmetic for 32-bit integer types. -void TurboAssembler::ShiftRightS32(Register dst, Register src, +void MacroAssembler::ShiftRightS32(Register dst, Register src, const Operand& val) { ShiftRightS32(dst, src, r0, val); } // Shift right arithmetic for 32-bit integer types. -void TurboAssembler::ShiftRightS32(Register dst, Register src, Register val, +void MacroAssembler::ShiftRightS32(Register dst, Register src, Register val, const Operand& val2) { if (dst == src) { sra(dst, val, val2); @@ -4726,19 +4694,19 @@ void TurboAssembler::ShiftRightS32(Register dst, Register src, Register val, } // Shift right arithmetic for 64-bit integer types. -void TurboAssembler::ShiftRightS64(Register dst, Register src, +void MacroAssembler::ShiftRightS64(Register dst, Register src, const Operand& val) { ShiftRightS64(dst, src, r0, val); } // Shift right arithmetic for 64-bit integer types. -void TurboAssembler::ShiftRightS64(Register dst, Register src, Register val, +void MacroAssembler::ShiftRightS64(Register dst, Register src, Register val, const Operand& val2) { srag(dst, src, val, val2); } // Clear right most # of bits -void TurboAssembler::ClearRightImm(Register dst, Register src, +void MacroAssembler::ClearRightImm(Register dst, Register src, const Operand& val) { int numBitsToClear = val.immediate() % (kSystemPointerSize * 8); @@ -4765,7 +4733,7 @@ void TurboAssembler::ClearRightImm(Register dst, Register src, } } -void TurboAssembler::Popcnt32(Register dst, Register src) { +void MacroAssembler::Popcnt32(Register dst, Register src) { DCHECK(src != r0); DCHECK(dst != r0); @@ -4778,7 +4746,7 @@ void TurboAssembler::Popcnt32(Register dst, Register src) { } #ifdef V8_TARGET_ARCH_S390X -void TurboAssembler::Popcnt64(Register dst, Register src) { +void MacroAssembler::Popcnt64(Register dst, Register src) { DCHECK(src != r0); DCHECK(dst != r0); @@ -4793,7 +4761,7 @@ void TurboAssembler::Popcnt64(Register dst, Register src) { } #endif -void TurboAssembler::SwapP(Register src, Register dst, Register scratch) { +void MacroAssembler::SwapP(Register src, Register dst, Register scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); mov(scratch, src); @@ -4801,7 +4769,7 @@ void TurboAssembler::SwapP(Register src, Register dst, Register scratch) { mov(dst, scratch); } -void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) { +void MacroAssembler::SwapP(Register src, MemOperand dst, Register scratch) { if (dst.rx() != r0) DCHECK(!AreAliased(src, dst.rx(), scratch)); if (dst.rb() != r0) DCHECK(!AreAliased(src, dst.rb(), scratch)); DCHECK(!AreAliased(src, scratch)); @@ -4810,7 +4778,7 @@ void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) { StoreU64(scratch, dst); } -void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, +void MacroAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, Register scratch_1) { if (src.rx() != r0) DCHECK(!AreAliased(src.rx(), scratch_0, scratch_1)); if (src.rb() != r0) DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1)); @@ -4823,7 +4791,7 @@ void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0, StoreU64(scratch_1, src); } -void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, +void MacroAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); @@ -4832,7 +4800,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst, ldr(dst, scratch); } -void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, +void MacroAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch) { DCHECK(!AreAliased(src, scratch)); ldr(scratch, src); @@ -4840,7 +4808,7 @@ void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst, StoreF32(scratch, dst); } -void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst, +void MacroAssembler::SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch) { // push d0, to be used as scratch lay(sp, MemOperand(sp, -kDoubleSize)); @@ -4854,7 +4822,7 @@ void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst, lay(sp, MemOperand(sp, kDoubleSize)); } -void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, +void MacroAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, DoubleRegister scratch) { if (src == dst) return; DCHECK(!AreAliased(src, dst, scratch)); @@ -4863,7 +4831,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst, ldr(dst, scratch); } -void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst, +void MacroAssembler::SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch) { DCHECK(!AreAliased(src, scratch)); ldr(scratch, src); @@ -4871,7 +4839,7 @@ void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst, StoreF64(scratch, dst); } -void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst, +void MacroAssembler::SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch) { // push d0, to be used as scratch lay(sp, MemOperand(sp, -kDoubleSize)); @@ -4885,7 +4853,7 @@ void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst, lay(sp, MemOperand(sp, kDoubleSize)); } -void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, +void MacroAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, Simd128Register scratch) { if (src == dst) return; vlr(scratch, src, Condition(0), Condition(0), Condition(0)); @@ -4893,7 +4861,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst, vlr(dst, scratch, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst, +void MacroAssembler::SwapSimd128(Simd128Register src, MemOperand dst, Simd128Register scratch) { DCHECK(!AreAliased(src, scratch)); vlr(scratch, src, Condition(0), Condition(0), Condition(0)); @@ -4901,7 +4869,7 @@ void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst, StoreV128(scratch, dst, ip); } -void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, +void MacroAssembler::SwapSimd128(MemOperand src, MemOperand dst, Simd128Register scratch) { // push d0, to be used as scratch lay(sp, MemOperand(sp, -kSimd128Size)); @@ -4915,27 +4883,27 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, lay(sp, MemOperand(sp, kSimd128Size)); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { larl(dst, Operand(-pc_offset() / 2)); } -void TurboAssembler::LoadPC(Register dst) { +void MacroAssembler::LoadPC(Register dst) { Label current_pc; larl(dst, ¤t_pc); bind(¤t_pc); } -void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { CmpS32(x, Operand(y)); beq(dest); } -void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { +void MacroAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) { CmpS32(x, Operand(y)); blt(dest); } -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { +void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { static_assert(kSystemPointerSize == 8); static_assert(kSmiTagSize == 1); static_assert(kSmiTag == 0); @@ -4952,31 +4920,31 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { IsolateData::builtin_entry_table_offset())); } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { LoadEntryFromBuiltinIndex(builtin_index); Call(builtin_index); } -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, +void MacroAssembler::LoadEntryFromBuiltin(Builtin builtin, Register destination) { ASM_CODE_COMMENT(this); LoadU64(destination, EntryFromBuiltinAsOperand(builtin)); } -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +MemOperand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { ASM_CODE_COMMENT(this); DCHECK(root_array_available()); return MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); LoadU64(destination, FieldMemOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -4986,20 +4954,20 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Operand(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { ASM_CODE_COMMENT(this); LoadCodeEntry(code_object, code_object); Call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { ASM_CODE_COMMENT(this); DCHECK_EQ(JumpMode::kJump, jump_mode); LoadCodeEntry(code_object, code_object); Jump(code_object); } -void TurboAssembler::StoreReturnAddressAndCall(Register target) { +void MacroAssembler::StoreReturnAddressAndCall(Register target) { // This generates the final instruction sequence for calls to C functions // once an exit frame has been constructed. // @@ -5017,7 +4985,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { bind(&return_label); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); @@ -5029,10 +4997,10 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } +void MacroAssembler::Trap() { stop(); } +void MacroAssembler::DebugBreak() { stop(); } -void TurboAssembler::CountLeadingZerosU32(Register dst, Register src, +void MacroAssembler::CountLeadingZerosU32(Register dst, Register src, Register scratch_pair) { llgfr(dst, src); flogr(scratch_pair, @@ -5040,14 +5008,14 @@ void TurboAssembler::CountLeadingZerosU32(Register dst, Register src, AddS32(dst, scratch_pair, Operand(-32)); } -void TurboAssembler::CountLeadingZerosU64(Register dst, Register src, +void MacroAssembler::CountLeadingZerosU64(Register dst, Register src, Register scratch_pair) { flogr(scratch_pair, src); // will modify a register pair scratch and scratch + 1 mov(dst, scratch_pair); } -void TurboAssembler::CountTrailingZerosU32(Register dst, Register src, +void MacroAssembler::CountTrailingZerosU32(Register dst, Register src, Register scratch_pair) { Register scratch0 = scratch_pair; Register scratch1 = Register::from_code(scratch_pair.code() + 1); @@ -5068,7 +5036,7 @@ void TurboAssembler::CountTrailingZerosU32(Register dst, Register src, bind(&done); } -void TurboAssembler::CountTrailingZerosU64(Register dst, Register src, +void MacroAssembler::CountTrailingZerosU64(Register dst, Register src, Register scratch_pair) { Register scratch0 = scratch_pair; Register scratch1 = Register::from_code(scratch_pair.code() + 1); @@ -5088,7 +5056,7 @@ void TurboAssembler::CountTrailingZerosU64(Register dst, Register src, bind(&done); } -void TurboAssembler::AtomicCmpExchangeHelper(Register addr, Register output, +void MacroAssembler::AtomicCmpExchangeHelper(Register addr, Register output, Register old_value, Register new_value, int start, int end, int shift_amount, @@ -5106,7 +5074,7 @@ void TurboAssembler::AtomicCmpExchangeHelper(Register addr, Register output, Operand(64 - shift_amount), true); } -void TurboAssembler::AtomicCmpExchangeU8(Register addr, Register output, +void MacroAssembler::AtomicCmpExchangeU8(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1) { #ifdef V8_TARGET_BIG_ENDIAN @@ -5155,7 +5123,7 @@ void TurboAssembler::AtomicCmpExchangeU8(Register addr, Register output, bind(&done); } -void TurboAssembler::AtomicCmpExchangeU16(Register addr, Register output, +void MacroAssembler::AtomicCmpExchangeU16(Register addr, Register output, Register old_value, Register new_value, Register temp0, Register temp1) { @@ -5193,7 +5161,7 @@ void TurboAssembler::AtomicCmpExchangeU16(Register addr, Register output, bind(&done); } -void TurboAssembler::AtomicExchangeHelper(Register addr, Register value, +void MacroAssembler::AtomicExchangeHelper(Register addr, Register value, Register output, int start, int end, int shift_amount, int offset, Register scratch) { @@ -5208,7 +5176,7 @@ void TurboAssembler::AtomicExchangeHelper(Register addr, Register value, srl(output, Operand(shift_amount)); } -void TurboAssembler::AtomicExchangeU8(Register addr, Register value, +void MacroAssembler::AtomicExchangeU8(Register addr, Register value, Register output, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN #define ATOMIC_EXCHANGE_BYTE(i) \ @@ -5260,7 +5228,7 @@ void TurboAssembler::AtomicExchangeU8(Register addr, Register value, bind(&done); } -void TurboAssembler::AtomicExchangeU16(Register addr, Register value, +void MacroAssembler::AtomicExchangeU16(Register addr, Register value, Register output, Register scratch) { #ifdef V8_TARGET_BIG_ENDIAN #define ATOMIC_EXCHANGE_HALFWORD(i) \ @@ -5301,77 +5269,77 @@ void TurboAssembler::AtomicExchangeU16(Register addr, Register value, } // Simd Support. -void TurboAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) { +void MacroAssembler::F64x2Splat(Simd128Register dst, Simd128Register src) { vrep(dst, src, Operand(0), Condition(3)); } -void TurboAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) { +void MacroAssembler::F32x4Splat(Simd128Register dst, Simd128Register src) { vrep(dst, src, Operand(0), Condition(2)); } -void TurboAssembler::I64x2Splat(Simd128Register dst, Register src) { +void MacroAssembler::I64x2Splat(Simd128Register dst, Register src) { vlvg(dst, src, MemOperand(r0, 0), Condition(3)); vrep(dst, dst, Operand(0), Condition(3)); } -void TurboAssembler::I32x4Splat(Simd128Register dst, Register src) { +void MacroAssembler::I32x4Splat(Simd128Register dst, Register src) { vlvg(dst, src, MemOperand(r0, 0), Condition(2)); vrep(dst, dst, Operand(0), Condition(2)); } -void TurboAssembler::I16x8Splat(Simd128Register dst, Register src) { +void MacroAssembler::I16x8Splat(Simd128Register dst, Register src) { vlvg(dst, src, MemOperand(r0, 0), Condition(1)); vrep(dst, dst, Operand(0), Condition(1)); } -void TurboAssembler::I8x16Splat(Simd128Register dst, Register src) { +void MacroAssembler::I8x16Splat(Simd128Register dst, Register src) { vlvg(dst, src, MemOperand(r0, 0), Condition(0)); vrep(dst, dst, Operand(0), Condition(0)); } -void TurboAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, +void MacroAssembler::F64x2ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vrep(dst, src, Operand(1 - imm_lane_idx), Condition(3)); } -void TurboAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, +void MacroAssembler::F32x4ExtractLane(DoubleRegister dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vrep(dst, src, Operand(3 - imm_lane_idx), Condition(2)); } -void TurboAssembler::I64x2ExtractLane(Register dst, Simd128Register src, +void MacroAssembler::I64x2ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vlgv(dst, src, MemOperand(r0, 1 - imm_lane_idx), Condition(3)); } -void TurboAssembler::I32x4ExtractLane(Register dst, Simd128Register src, +void MacroAssembler::I32x4ExtractLane(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vlgv(dst, src, MemOperand(r0, 3 - imm_lane_idx), Condition(2)); } -void TurboAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, +void MacroAssembler::I16x8ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vlgv(dst, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1)); } -void TurboAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src, +void MacroAssembler::I16x8ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register scratch) { vlgv(scratch, src, MemOperand(r0, 7 - imm_lane_idx), Condition(1)); lghr(dst, scratch); } -void TurboAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src, +void MacroAssembler::I8x16ExtractLaneU(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register) { vlgv(dst, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0)); } -void TurboAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src, +void MacroAssembler::I8x16ExtractLaneS(Register dst, Simd128Register src, uint8_t imm_lane_idx, Register scratch) { vlgv(scratch, src, MemOperand(r0, 15 - imm_lane_idx), Condition(0)); lgbr(dst, scratch); } -void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch) { vlgv(scratch, src2, MemOperand(r0, 0), Condition(3)); @@ -5381,7 +5349,7 @@ void TurboAssembler::F64x2ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, scratch, MemOperand(r0, 1 - imm_lane_idx), Condition(3)); } -void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, DoubleRegister src2, uint8_t imm_lane_idx, Register scratch) { vlgv(scratch, src2, MemOperand(r0, 0), Condition(2)); @@ -5391,7 +5359,7 @@ void TurboAssembler::F32x4ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, scratch, MemOperand(r0, 3 - imm_lane_idx), Condition(2)); } -void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register) { if (src1 != dst) { @@ -5400,7 +5368,7 @@ void TurboAssembler::I64x2ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, src2, MemOperand(r0, 1 - imm_lane_idx), Condition(3)); } -void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register) { if (src1 != dst) { @@ -5409,7 +5377,7 @@ void TurboAssembler::I32x4ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, src2, MemOperand(r0, 3 - imm_lane_idx), Condition(2)); } -void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register) { if (src1 != dst) { @@ -5418,7 +5386,7 @@ void TurboAssembler::I16x8ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, src2, MemOperand(r0, 7 - imm_lane_idx), Condition(1)); } -void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, Register src2, uint8_t imm_lane_idx, Register) { if (src1 != dst) { @@ -5427,19 +5395,19 @@ void TurboAssembler::I8x16ReplaceLane(Simd128Register dst, Simd128Register src1, vlvg(dst, src2, MemOperand(r0, 15 - imm_lane_idx), Condition(0)); } -void TurboAssembler::S128Not(Simd128Register dst, Simd128Register src) { +void MacroAssembler::S128Not(Simd128Register dst, Simd128Register src) { vno(dst, src, src, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::S128Zero(Simd128Register dst, Simd128Register src) { +void MacroAssembler::S128Zero(Simd128Register dst, Simd128Register src) { vx(dst, src, src, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) { +void MacroAssembler::S128AllOnes(Simd128Register dst, Simd128Register src) { vceq(dst, src, src, Condition(0), Condition(3)); } -void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1, +void MacroAssembler::S128Select(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register mask) { vsel(dst, src1, src2, mask, Condition(0), Condition(0)); } @@ -5482,7 +5450,7 @@ void TurboAssembler::S128Select(Simd128Register dst, Simd128Register src1, V(I8x16Popcnt, vpopct, 0, 0, 0) #define EMIT_SIMD_UNOP_VRR_A(name, op, c1, c2, c3) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src) { \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src) { \ op(dst, src, Condition(c1), Condition(c2), Condition(c3)); \ } SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A) @@ -5503,7 +5471,7 @@ SIMD_UNOP_LIST_VRR_A(EMIT_SIMD_UNOP_VRR_A) V(I8x16GtU, vchl, 0, 0) #define EMIT_SIMD_BINOP_VRR_B(name, op, c1, c2) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2) { \ op(dst, src1, src2, Condition(c1), Condition(c2)); \ } @@ -5560,7 +5528,7 @@ SIMD_BINOP_LIST_VRR_B(EMIT_SIMD_BINOP_VRR_B) V(S128AndNot, vnc, 0, 0, 0) #define EMIT_SIMD_BINOP_VRR_C(name, op, c1, c2, c3) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2) { \ op(dst, src1, src2, Condition(c1), Condition(c2), Condition(c3)); \ } @@ -5583,13 +5551,13 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C) V(I8x16ShrU, vesrlv, 0) #define EMIT_SIMD_SHIFT(name, op, c1) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Register src2, Simd128Register scratch) { \ vlvg(scratch, src2, MemOperand(r0, 0), Condition(c1)); \ vrep(scratch, scratch, Operand(0), Condition(c1)); \ op(dst, src1, scratch, Condition(0), Condition(0), Condition(c1)); \ } \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ const Operand& src2, Register scratch1, \ Simd128Register scratch2) { \ mov(scratch1, src2); \ @@ -5614,7 +5582,7 @@ SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT) V(I16x8ExtMulHighI8x16U, vmle, vmlo, vmrh, 0) #define EMIT_SIMD_EXT_MUL(name, mul_even, mul_odd, merge, mode) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2, Simd128Register scratch) { \ mul_even(scratch, src1, src2, Condition(0), Condition(0), \ Condition(mode)); \ @@ -5632,7 +5600,7 @@ SIMD_EXT_MUL_LIST(EMIT_SIMD_EXT_MUL) V(I8x16AllTrue, 0) #define EMIT_SIMD_ALL_TRUE(name, mode) \ - void TurboAssembler::name(Register dst, Simd128Register src, \ + void MacroAssembler::name(Register dst, Simd128Register src, \ Register scratch1, Simd128Register scratch2) { \ mov(scratch1, Operand(1)); \ xgr(dst, dst); \ @@ -5653,7 +5621,7 @@ SIMD_ALL_TRUE_LIST(EMIT_SIMD_ALL_TRUE) V(F32x4Qfms, vfnms, 2) #define EMIT_SIMD_QFM(name, op, c1) \ - void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \ + void MacroAssembler::name(Simd128Register dst, Simd128Register src1, \ Simd128Register src2, Simd128Register src3) { \ op(dst, src1, src2, src3, Condition(c1), Condition(0)); \ } @@ -5661,7 +5629,7 @@ SIMD_QFM_LIST(EMIT_SIMD_QFM) #undef EMIT_SIMD_QFM #undef SIMD_QFM_LIST -void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Register scratch3) { Register scratch_1 = scratch1; @@ -5676,112 +5644,112 @@ void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1, vlvgp(dst, scratch1, scratch2); } -void TurboAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfce(dst, src1, src2, Condition(0), Condition(0), Condition(3)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Lt(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfch(dst, src2, src1, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::F64x2Le(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F64x2Le(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfche(dst, src2, src1, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfce(dst, src1, src2, Condition(0), Condition(0), Condition(2)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Lt(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfch(dst, src2, src1, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::F32x4Le(Simd128Register dst, Simd128Register src1, +void MacroAssembler::F32x4Le(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vfche(dst, src2, src1, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vceq(dst, src1, src2, Condition(0), Condition(3)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I64x2GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2) { // Compute !(B > A) which is equal to A >= B. vch(dst, src2, src1, Condition(0), Condition(3)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(3)); } -void TurboAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vceq(dst, src1, src2, Condition(0), Condition(2)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2) { // Compute !(B > A) which is equal to A >= B. vch(dst, src2, src1, Condition(0), Condition(2)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vceq(scratch, src1, src2, Condition(0), Condition(2)); vchl(dst, src1, src2, Condition(0), Condition(2)); vo(dst, dst, scratch, Condition(0), Condition(0), Condition(2)); } -void TurboAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vceq(dst, src1, src2, Condition(0), Condition(1)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(1)); } -void TurboAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2) { // Compute !(B > A) which is equal to A >= B. vch(dst, src2, src1, Condition(0), Condition(1)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(1)); } -void TurboAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vceq(scratch, src1, src2, Condition(0), Condition(1)); vchl(dst, src1, src2, Condition(0), Condition(1)); vo(dst, dst, scratch, Condition(0), Condition(0), Condition(1)); } -void TurboAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Ne(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vceq(dst, src1, src2, Condition(0), Condition(0)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16GeS(Simd128Register dst, Simd128Register src1, Simd128Register src2) { // Compute !(B > A) which is equal to A >= B. vch(dst, src2, src1, Condition(0), Condition(0)); vno(dst, dst, dst, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16GeU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vceq(scratch, src1, src2, Condition(0), Condition(0)); vchl(dst, src1, src2, Condition(0), Condition(0)); vo(dst, dst, scratch, Condition(0), Condition(0), Condition(0)); } -void TurboAssembler::I64x2BitMask(Register dst, Simd128Register src, +void MacroAssembler::I64x2BitMask(Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { mov(scratch1, Operand(0x8080808080800040)); vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3)); @@ -5789,7 +5757,7 @@ void TurboAssembler::I64x2BitMask(Register dst, Simd128Register src, vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0)); } -void TurboAssembler::I32x4BitMask(Register dst, Simd128Register src, +void MacroAssembler::I32x4BitMask(Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { mov(scratch1, Operand(0x8080808000204060)); vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3)); @@ -5797,7 +5765,7 @@ void TurboAssembler::I32x4BitMask(Register dst, Simd128Register src, vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0)); } -void TurboAssembler::I16x8BitMask(Register dst, Simd128Register src, +void MacroAssembler::I16x8BitMask(Register dst, Simd128Register src, Register scratch1, Simd128Register scratch2) { mov(scratch1, Operand(0x10203040506070)); vlvg(scratch2, scratch1, MemOperand(r0, 1), Condition(3)); @@ -5805,19 +5773,19 @@ void TurboAssembler::I16x8BitMask(Register dst, Simd128Register src, vlgv(dst, scratch2, MemOperand(r0, 7), Condition(0)); } -void TurboAssembler::F64x2ConvertLowI32x4S(Simd128Register dst, +void MacroAssembler::F64x2ConvertLowI32x4S(Simd128Register dst, Simd128Register src) { vupl(dst, src, Condition(0), Condition(0), Condition(2)); vcdg(dst, dst, Condition(4), Condition(0), Condition(3)); } -void TurboAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, +void MacroAssembler::F64x2ConvertLowI32x4U(Simd128Register dst, Simd128Register src) { vupll(dst, src, Condition(0), Condition(0), Condition(2)); vcdlg(dst, dst, Condition(4), Condition(0), Condition(3)); } -void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src, +void MacroAssembler::I8x16BitMask(Register dst, Simd128Register src, Register scratch1, Register scratch2, Simd128Register scratch3) { mov(scratch1, Operand(0x4048505860687078)); @@ -5827,7 +5795,7 @@ void TurboAssembler::I8x16BitMask(Register dst, Simd128Register src, vlgv(dst, scratch3, MemOperand(r0, 3), Condition(1)); } -void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, +void MacroAssembler::V128AnyTrue(Register dst, Simd128Register src, Register scratch) { mov(dst, Operand(1)); xgr(scratch, scratch); @@ -5842,7 +5810,7 @@ void TurboAssembler::V128AnyTrue(Register dst, Simd128Register src, convert(scratch2, scratch1, kRoundToZero); \ vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \ } -void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst, +void MacroAssembler::I32x4SConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2) { @@ -5856,7 +5824,7 @@ void TurboAssembler::I32x4SConvertF32x4(Simd128Register dst, } } -void TurboAssembler::I32x4UConvertF32x4(Simd128Register dst, +void MacroAssembler::I32x4UConvertF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2) { @@ -5878,7 +5846,7 @@ void TurboAssembler::I32x4UConvertF32x4(Simd128Register dst, MovFloatToInt(scratch2, scratch1); \ vlvg(dst, scratch2, MemOperand(r0, index), Condition(2)); \ } -void TurboAssembler::F32x4SConvertI32x4(Simd128Register dst, +void MacroAssembler::F32x4SConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2) { @@ -5888,7 +5856,7 @@ void TurboAssembler::F32x4SConvertI32x4(Simd128Register dst, CONVERT_INT32_TO_FLOAT(ConvertIntToFloat, dst, src, scratch1, scratch2) } } -void TurboAssembler::F32x4UConvertI32x4(Simd128Register dst, +void MacroAssembler::F32x4UConvertI32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2) { @@ -5901,13 +5869,13 @@ void TurboAssembler::F32x4UConvertI32x4(Simd128Register dst, } #undef CONVERT_INT32_TO_FLOAT -void TurboAssembler::I16x8SConvertI32x4(Simd128Register dst, +void MacroAssembler::I16x8SConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpks(dst, src2, src1, Condition(0), Condition(2)); } -void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst, +void MacroAssembler::I8x16SConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2) { vpks(dst, src2, src1, Condition(0), Condition(1)); @@ -5919,7 +5887,7 @@ void TurboAssembler::I8x16SConvertI16x8(Simd128Register dst, vmx(scratch, src1, kDoubleRegZero, Condition(0), Condition(0), \ Condition(mode)); \ vmx(dst, src2, kDoubleRegZero, Condition(0), Condition(0), Condition(mode)); -void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst, +void MacroAssembler::I16x8UConvertI32x4(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -5928,7 +5896,7 @@ void TurboAssembler::I16x8UConvertI32x4(Simd128Register dst, vpkls(dst, dst, scratch, Condition(0), Condition(2)); } -void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst, +void MacroAssembler::I8x16UConvertI16x8(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { @@ -5950,7 +5918,7 @@ void TurboAssembler::I8x16UConvertI16x8(Simd128Register dst, extract_low(scratch2, src2, Condition(0), Condition(0), Condition(mode)); \ op(scratch1, scratch1, scratch2, Condition(0), Condition(0), \ Condition(mode + 1)); -void TurboAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5958,7 +5926,7 @@ void TurboAssembler::I16x8AddSatS(Simd128Register dst, Simd128Register src1, vpks(dst, dst, scratch1, Condition(0), Condition(2)); } -void TurboAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5966,7 +5934,7 @@ void TurboAssembler::I16x8SubSatS(Simd128Register dst, Simd128Register src1, vpks(dst, dst, scratch1, Condition(0), Condition(2)); } -void TurboAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5974,7 +5942,7 @@ void TurboAssembler::I16x8AddSatU(Simd128Register dst, Simd128Register src1, vpkls(dst, dst, scratch1, Condition(0), Condition(2)); } -void TurboAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5988,7 +5956,7 @@ void TurboAssembler::I16x8SubSatU(Simd128Register dst, Simd128Register src1, vpkls(dst, dst, scratch1, Condition(0), Condition(2)); } -void TurboAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -5996,7 +5964,7 @@ void TurboAssembler::I8x16AddSatS(Simd128Register dst, Simd128Register src1, vpks(dst, dst, scratch1, Condition(0), Condition(1)); } -void TurboAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -6004,7 +5972,7 @@ void TurboAssembler::I8x16SubSatS(Simd128Register dst, Simd128Register src1, vpks(dst, dst, scratch1, Condition(0), Condition(1)); } -void TurboAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -6012,7 +5980,7 @@ void TurboAssembler::I8x16AddSatU(Simd128Register dst, Simd128Register src1, vpkls(dst, dst, scratch1, Condition(0), Condition(1)); } -void TurboAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2) { @@ -6027,7 +5995,7 @@ void TurboAssembler::I8x16SubSatU(Simd128Register dst, Simd128Register src1, } #undef BINOP_EXTRACT -void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst, +void MacroAssembler::F64x2PromoteLowF32x4(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, @@ -6043,7 +6011,7 @@ void TurboAssembler::F64x2PromoteLowF32x4(Simd128Register dst, vlvgp(dst, scratch3, scratch4); } -void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, +void MacroAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Register scratch2, Register scratch3, @@ -6071,14 +6039,14 @@ void TurboAssembler::F32x4DemoteF64x2Zero(Simd128Register dst, Condition(lane_size)); \ va(dst, scratch1, scratch2, Condition(0), Condition(0), \ Condition(lane_size + 1)); -void TurboAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst, +void MacroAssembler::I32x4ExtAddPairwiseI16x8S(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 1, vme, vmo) } -void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, +void MacroAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, Simd128Register src, Simd128Register scratch, Simd128Register scratch2) { @@ -6086,14 +6054,14 @@ void TurboAssembler::I32x4ExtAddPairwiseI16x8U(Simd128Register dst, vsum(dst, src, scratch, Condition(0), Condition(0), Condition(1)); } -void TurboAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst, +void MacroAssembler::I16x8ExtAddPairwiseI8x16S(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { EXT_ADD_PAIRWISE(dst, src, scratch1, scratch2, 0, vme, vmo) } -void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, +void MacroAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, Simd128Register src, Simd128Register scratch1, Simd128Register scratch2) { @@ -6101,7 +6069,7 @@ void TurboAssembler::I16x8ExtAddPairwiseI8x16U(Simd128Register dst, } #undef EXT_ADD_PAIRWISE -void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, +void MacroAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { // NaN to 0. @@ -6113,7 +6081,7 @@ void TurboAssembler::I32x4TruncSatF64x2SZero(Simd128Register dst, vpks(dst, dst, scratch, Condition(0), Condition(3)); } -void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, +void MacroAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, Simd128Register src, Simd128Register scratch) { vclgd(scratch, src, Condition(5), Condition(0), Condition(3)); @@ -6121,14 +6089,14 @@ void TurboAssembler::I32x4TruncSatF64x2UZero(Simd128Register dst, vpkls(dst, dst, scratch, Condition(0), Condition(3)); } -void TurboAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low, +void MacroAssembler::S128Const(Simd128Register dst, uint64_t high, uint64_t low, Register scratch1, Register scratch2) { mov(scratch1, Operand(low)); mov(scratch2, Operand(high)); vlvgp(dst, scratch2, scratch1); } -void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, Simd128Register src2, Register scratch1, Register scratch2, Simd128Register scratch3, Simd128Register scratch4) { @@ -6148,7 +6116,7 @@ void TurboAssembler::I8x16Swizzle(Simd128Register dst, Simd128Register src1, vperm(dst, dst, scratch3, scratch4, Condition(0), Condition(0)); } -void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, Simd128Register src2, uint64_t high, uint64_t low, Register scratch1, Register scratch2, Simd128Register scratch3) { @@ -6158,7 +6126,7 @@ void TurboAssembler::I8x16Shuffle(Simd128Register dst, Simd128Register src1, vperm(dst, src1, src2, scratch3, Condition(0), Condition(0)); } -void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch) { vme(scratch, src1, src2, Condition(0), Condition(0), Condition(1)); @@ -6176,7 +6144,7 @@ void TurboAssembler::I32x4DotI16x8S(Simd128Register dst, Simd128Register src1, vrepi(scratch, Operand(15), Condition(2)); \ vesrav(accumulator, accumulator, scratch, Condition(0), Condition(0), \ Condition(2)); -void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, +void MacroAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, Simd128Register src2, Simd128Register scratch1, Simd128Register scratch2, @@ -6206,7 +6174,7 @@ void TurboAssembler::I16x8Q15MulRSatS(Simd128Register dst, Simd128Register src1, V(8x16, vlrep, LoadU8, 0) #define LOAD_SPLAT(name, vector_instr, scalar_instr, condition) \ - void TurboAssembler::LoadAndSplat##name##LE( \ + void MacroAssembler::LoadAndSplat##name##LE( \ Simd128Register dst, const MemOperand& mem, Register scratch) { \ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \ vector_instr(dst, mem, Condition(condition)); \ @@ -6229,7 +6197,7 @@ LOAD_SPLAT_LIST(LOAD_SPLAT) V(8x8S, vuph, 0) #define LOAD_EXTEND(name, unpack_instr, condition) \ - void TurboAssembler::LoadAndExtend##name##LE( \ + void MacroAssembler::LoadAndExtend##name##LE( \ Simd128Register dst, const MemOperand& mem, Register scratch) { \ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \ vlebrg(dst, mem, Condition(0)); \ @@ -6243,7 +6211,7 @@ LOAD_EXTEND_LIST(LOAD_EXTEND) #undef LOAD_EXTEND #undef LOAD_EXTEND -void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem, Register scratch) { vx(dst, dst, dst, Condition(0), Condition(0), Condition(0)); if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { @@ -6254,7 +6222,7 @@ void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem, vlvg(dst, scratch, MemOperand(r0, 3), Condition(2)); } -void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem, +void MacroAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem, Register scratch) { vx(dst, dst, dst, Condition(0), Condition(0), Condition(0)); if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { @@ -6272,7 +6240,7 @@ void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem, V(8, vleb, LoadU8, 0) #define LOAD_LANE(name, vector_instr, scalar_instr, condition) \ - void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \ + void MacroAssembler::LoadLane##name##LE(Simd128Register dst, \ const MemOperand& mem, int lane, \ Register scratch) { \ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \ @@ -6293,7 +6261,7 @@ LOAD_LANE_LIST(LOAD_LANE) V(8, vsteb, StoreU8, 0) #define STORE_LANE(name, vector_instr, scalar_instr, condition) \ - void TurboAssembler::StoreLane##name##LE(Simd128Register src, \ + void MacroAssembler::StoreLane##name##LE(Simd128Register src, \ const MemOperand& mem, int lane, \ Register scratch) { \ if (CAN_LOAD_STORE_REVERSE && is_uint12(mem.offset())) { \ @@ -6317,10 +6285,10 @@ void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); LoadU64(destination, MemOperand(kRootRegister, offset)); } diff --git a/src/codegen/s390/macro-assembler-s390.h b/src/codegen/s390/macro-assembler-s390.h index a008a3ab7d..b076096f5c 100644 --- a/src/codegen/s390/macro-assembler-s390.h +++ b/src/codegen/s390/macro-assembler-s390.h @@ -41,9 +41,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, Register reg5 = no_reg, Register reg6 = no_reg); -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; void CallBuiltin(Builtin builtin, Condition cond = al); void TailCallBuiltin(Builtin builtin, Condition cond = al); @@ -1464,17 +1464,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { #endif } - // Loads a field containing a HeapObject and decompresses it if pointer - // compression is enabled. - void LoadTaggedPointerField(const Register& destination, - const MemOperand& field_operand, - const Register& scratch = no_reg); - void LoadTaggedSignedField(Register destination, MemOperand field_operand); - // Loads a field containing any tagged value and decompresses it if necessary. - void LoadAnyTaggedField(const Register& destination, - const MemOperand& field_operand, - const Register& scratch = no_reg); + void LoadTaggedField(const Register& destination, + const MemOperand& field_operand, + const Register& scratch = no_reg); + void LoadTaggedSignedField(Register destination, MemOperand field_operand); // Loads a field containing smi value and untags it. void SmiUntagField(Register dst, const MemOperand& src); @@ -1486,11 +1480,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void DecompressTaggedSigned(Register destination, MemOperand field_operand); void DecompressTaggedSigned(Register destination, Register src); - void DecompressTaggedPointer(Register destination, MemOperand field_operand); - void DecompressTaggedPointer(Register destination, Register source); - void DecompressTaggedPointer(const Register& destination, Tagged_t immediate); - void DecompressAnyTagged(Register destination, MemOperand field_operand); - void DecompressAnyTagged(Register destination, Register source); + void DecompressTagged(Register destination, MemOperand field_operand); + void DecompressTagged(Register destination, Register source); + void DecompressTagged(const Register& destination, Tagged_t immediate); // CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1) void CountLeadingZerosU32(Register dst, Register src, @@ -1502,22 +1494,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CountTrailingZerosU64(Register dst, Register src, Register scratch_pair = r0); - private: - static const int kSmiShift = kSmiTagSize + kSmiShiftSize; - - void CallCFunctionHelper(Register function, int num_reg_arguments, - int num_double_arguments); - - void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); - int CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - void LoadStackLimit(Register destination, StackLimitKind kind); // It assumes that the arguments are located below the stack pointer. // argc is the number of arguments not including the receiver. @@ -1803,6 +1779,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { private: static const int kSmiShift = kSmiTagSize + kSmiShiftSize; + + void CallCFunctionHelper(Register function, int num_reg_arguments, + int num_double_arguments); + + void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); + int CalculateStackPassedWords(int num_reg_arguments, + int num_double_arguments); + // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, diff --git a/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc index 60e1bce38e..fb7fb8d582 100644 --- a/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc +++ b/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc @@ -27,7 +27,7 @@ namespace v8 { namespace internal { -void SharedTurboAssembler::Move(Register dst, uint32_t src) { +void SharedMacroAssemblerBase::Move(Register dst, uint32_t src) { // Helper to paper over the different assembler function names. #if V8_TARGET_ARCH_IA32 mov(dst, Immediate(src)); @@ -38,7 +38,7 @@ void SharedTurboAssembler::Move(Register dst, uint32_t src) { #endif } -void SharedTurboAssembler::Move(Register dst, Register src) { +void SharedMacroAssemblerBase::Move(Register dst, Register src) { // Helper to paper over the different assembler function names. if (dst != src) { #if V8_TARGET_ARCH_IA32 @@ -51,7 +51,7 @@ void SharedTurboAssembler::Move(Register dst, Register src) { } } -void SharedTurboAssembler::Add(Register dst, Immediate src) { +void SharedMacroAssemblerBase::Add(Register dst, Immediate src) { // Helper to paper over the different assembler function names. #if V8_TARGET_ARCH_IA32 add(dst, src); @@ -62,7 +62,7 @@ void SharedTurboAssembler::Add(Register dst, Immediate src) { #endif } -void SharedTurboAssembler::And(Register dst, Immediate src) { +void SharedMacroAssemblerBase::And(Register dst, Immediate src) { // Helper to paper over the different assembler function names. #if V8_TARGET_ARCH_IA32 and_(dst, src); @@ -77,8 +77,8 @@ void SharedTurboAssembler::And(Register dst, Immediate src) { #endif } -void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1, - Operand src2) { +void SharedMacroAssemblerBase::Movhps(XMMRegister dst, XMMRegister src1, + Operand src2) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vmovhps(dst, src1, src2); @@ -90,8 +90,8 @@ void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1, - Operand src2) { +void SharedMacroAssemblerBase::Movlps(XMMRegister dst, XMMRegister src1, + Operand src2) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vmovlps(dst, src1, src2); @@ -102,8 +102,8 @@ void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1, movlps(dst, src2); } } -void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister mask) { +void SharedMacroAssemblerBase::Blendvpd(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister mask) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vblendvpd(dst, src1, src2, mask); @@ -115,8 +115,8 @@ void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister mask) { +void SharedMacroAssemblerBase::Blendvps(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister mask) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vblendvps(dst, src1, src2, mask); @@ -128,8 +128,8 @@ void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister mask) { +void SharedMacroAssemblerBase::Pblendvb(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister mask) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vpblendvb(dst, src1, src2, mask); @@ -141,8 +141,8 @@ void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, - XMMRegister src2, uint8_t imm8) { +void SharedMacroAssemblerBase::Shufps(XMMRegister dst, XMMRegister src1, + XMMRegister src2, uint8_t imm8) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vshufps(dst, src1, src2, imm8); @@ -154,8 +154,8 @@ void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src, - uint8_t lane) { +void SharedMacroAssemblerBase::F64x2ExtractLane(DoubleRegister dst, + XMMRegister src, uint8_t lane) { ASM_CODE_COMMENT(this); if (lane == 0) { if (dst != src) { @@ -173,8 +173,10 @@ void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src, } } -void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src, - DoubleRegister rep, uint8_t lane) { +void SharedMacroAssemblerBase::F64x2ReplaceLane(XMMRegister dst, + XMMRegister src, + DoubleRegister rep, + uint8_t lane) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -197,8 +199,8 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src, } } -void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister scratch) { +void SharedMacroAssemblerBase::F32x4Min(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister scratch) { ASM_CODE_COMMENT(this); // The minps instruction doesn't propagate NaNs and +0's in its first // operand. Perform minps in both orders, merge the results, and adjust. @@ -226,8 +228,8 @@ void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs, Andnps(dst, dst, scratch); } -void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister scratch) { +void SharedMacroAssemblerBase::F32x4Max(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister scratch) { ASM_CODE_COMMENT(this); // The maxps instruction doesn't propagate NaNs and +0's in its first // operand. Perform maxps in both orders, merge the results, and adjust. @@ -258,8 +260,8 @@ void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs, Andnps(dst, dst, scratch); } -void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister scratch) { +void SharedMacroAssemblerBase::F64x2Min(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -296,8 +298,8 @@ void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs, } } -void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister scratch) { +void SharedMacroAssemblerBase::F64x2Max(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -336,7 +338,7 @@ void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs, } } -void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) { +void SharedMacroAssemblerBase::F32x4Splat(XMMRegister dst, DoubleRegister src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX2)) { CpuFeatureScope avx2_scope(this, AVX2); @@ -354,8 +356,8 @@ void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) { } } -void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src, - uint8_t lane) { +void SharedMacroAssemblerBase::F32x4ExtractLane(FloatRegister dst, + XMMRegister src, uint8_t lane) { ASM_CODE_COMMENT(this); DCHECK_LT(lane, 4); // These instructions are shorter than insertps, but will leave junk in @@ -376,8 +378,8 @@ void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src, } } -void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src, - uint8_t laneidx) { +void SharedMacroAssemblerBase::S128Store32Lane(Operand dst, XMMRegister src, + uint8_t laneidx) { ASM_CODE_COMMENT(this); if (laneidx == 0) { Movss(dst, src); @@ -388,8 +390,8 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src, } template -void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I8x16SplatPreAvx2(XMMRegister dst, Op src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); DCHECK(!CpuFeatures::IsSupported(AVX2)); CpuFeatureScope ssse3_scope(this, SSSE3); @@ -398,8 +400,8 @@ void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src, Pshufb(dst, scratch); } -void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Register src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX2)) { CpuFeatureScope avx2_scope(this, AVX2); @@ -410,8 +412,8 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src, } } -void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Operand src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); DCHECK_OPERAND_IS_NOT_REG(src); if (CpuFeatures::IsSupported(AVX2)) { @@ -422,9 +424,9 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src, } } -void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, - uint8_t src2, Register tmp1, - XMMRegister tmp2) { +void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1, + uint8_t src2, Register tmp1, + XMMRegister tmp2) { ASM_CODE_COMMENT(this); DCHECK_NE(dst, tmp2); // Perform 16-bit shift, then mask away low bits. @@ -444,9 +446,9 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, Pand(dst, tmp2); } -void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, - Register src2, Register tmp1, - XMMRegister tmp2, XMMRegister tmp3) { +void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1, + Register src2, Register tmp1, + XMMRegister tmp2, XMMRegister tmp3) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, tmp2, tmp3)); DCHECK(!AreAliased(src1, tmp2, tmp3)); @@ -471,8 +473,8 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1, Psllw(dst, dst, tmp3); } -void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, - uint8_t src2, XMMRegister tmp) { +void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1, + uint8_t src2, XMMRegister tmp) { ASM_CODE_COMMENT(this); // Unpack bytes into words, do word (16-bit) shifts, and repack. DCHECK_NE(dst, tmp); @@ -485,9 +487,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, Packsswb(dst, tmp); } -void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, - Register src2, Register tmp1, - XMMRegister tmp2, XMMRegister tmp3) { +void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1, + Register src2, Register tmp1, + XMMRegister tmp2, XMMRegister tmp3) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, tmp2, tmp3)); DCHECK_NE(src1, tmp2); @@ -506,9 +508,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1, Packsswb(dst, tmp2); } -void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, - uint8_t src2, Register tmp1, - XMMRegister tmp2) { +void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1, + uint8_t src2, Register tmp1, + XMMRegister tmp2) { ASM_CODE_COMMENT(this); DCHECK_NE(dst, tmp2); if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) { @@ -528,9 +530,9 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, Pand(dst, tmp2); } -void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, - Register src2, Register tmp1, - XMMRegister tmp2, XMMRegister tmp3) { +void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1, + Register src2, Register tmp1, + XMMRegister tmp2, XMMRegister tmp3) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, tmp2, tmp3)); DCHECK_NE(src1, tmp2); @@ -550,14 +552,14 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1, } template -void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) { +void SharedMacroAssemblerBase::I16x8SplatPreAvx2(XMMRegister dst, Op src) { DCHECK(!CpuFeatures::IsSupported(AVX2)); Movd(dst, src); Pshuflw(dst, dst, uint8_t{0x0}); Punpcklqdq(dst, dst); } -void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) { +void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Register src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX2)) { CpuFeatureScope avx2_scope(this, AVX2); @@ -568,7 +570,7 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) { } } -void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) { +void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Operand src) { ASM_CODE_COMMENT(this); DCHECK_OPERAND_IS_NOT_REG(src); if (CpuFeatures::IsSupported(AVX2)) { @@ -579,18 +581,20 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) { } } -void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister scratch, - bool is_signed) { +void SharedMacroAssemblerBase::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, + XMMRegister src2, + XMMRegister scratch, + bool is_signed) { ASM_CODE_COMMENT(this); is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1); is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2); Pmullw(dst, scratch); } -void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1, - XMMRegister src2, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I16x8ExtMulHighS(XMMRegister dst, + XMMRegister src1, + XMMRegister src2, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -612,9 +616,10 @@ void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1, - XMMRegister src2, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I16x8ExtMulHighU(XMMRegister dst, + XMMRegister src1, + XMMRegister src2, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // The logic here is slightly complicated to handle all the cases of register // aliasing. This allows flexibility for callers in TurboFan and Liftoff. @@ -662,8 +667,8 @@ void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst, - XMMRegister src) { +void SharedMacroAssemblerBase::I16x8SConvertI8x16High(XMMRegister dst, + XMMRegister src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -685,9 +690,9 @@ void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst, } } -void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, - XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I16x8UConvertI8x16High(XMMRegister dst, + XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -711,9 +716,10 @@ void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst, } } -void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, - XMMRegister src2, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I16x8Q15MulRSatS(XMMRegister dst, + XMMRegister src1, + XMMRegister src2, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // k = i16x8.splat(0x8000) Pcmpeqd(scratch, scratch); @@ -729,9 +735,9 @@ void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, Pxor(dst, scratch); } -void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst, - XMMRegister src1, - XMMRegister src2) { +void SharedMacroAssemblerBase::I16x8DotI8x16I7x16S(XMMRegister dst, + XMMRegister src1, + XMMRegister src2) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -744,7 +750,7 @@ void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst, } } -void SharedTurboAssembler::I32x4DotI8x16I7x16AddS( +void SharedMacroAssemblerBase::I32x4DotI8x16I7x16AddS( XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3, XMMRegister scratch, XMMRegister splat_reg) { ASM_CODE_COMMENT(this); @@ -768,9 +774,9 @@ void SharedTurboAssembler::I32x4DotI8x16I7x16AddS( } } -void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, - XMMRegister src, - XMMRegister tmp) { +void SharedMacroAssemblerBase::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, + XMMRegister src, + XMMRegister tmp) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -812,9 +818,10 @@ void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, // 1. Multiply low word into scratch. // 2. Multiply high word (can be signed or unsigned) into dst. // 3. Unpack and interleave scratch and dst into dst. -void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister scratch, - bool low, bool is_signed) { +void SharedMacroAssemblerBase::I32x4ExtMul(XMMRegister dst, XMMRegister src1, + XMMRegister src2, + XMMRegister scratch, bool low, + bool is_signed) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -830,8 +837,8 @@ void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst, - XMMRegister src) { +void SharedMacroAssemblerBase::I32x4SConvertI16x8High(XMMRegister dst, + XMMRegister src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -853,9 +860,9 @@ void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst, } } -void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, - XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I32x4UConvertI16x8High(XMMRegister dst, + XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -879,8 +886,8 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst, } } -void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2Neg(XMMRegister dst, XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -896,8 +903,8 @@ void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src, } } -void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2Abs(XMMRegister dst, XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -917,8 +924,8 @@ void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src, } } -void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0, - XMMRegister src1, XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2GtS(XMMRegister dst, XMMRegister src0, + XMMRegister src1, XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -951,8 +958,8 @@ void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0, } } -void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0, - XMMRegister src1, XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2GeS(XMMRegister dst, XMMRegister src0, + XMMRegister src1, XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -986,8 +993,8 @@ void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0, } } -void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, - uint8_t shift, XMMRegister xmm_tmp) { +void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src, + uint8_t shift, XMMRegister xmm_tmp) { ASM_CODE_COMMENT(this); DCHECK_GT(64, shift); DCHECK_NE(xmm_tmp, dst); @@ -1019,10 +1026,10 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, Psubq(dst, xmm_tmp); } -void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, - Register shift, XMMRegister xmm_tmp, - XMMRegister xmm_shift, - Register tmp_shift) { +void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src, + Register shift, XMMRegister xmm_tmp, + XMMRegister xmm_shift, + Register tmp_shift) { ASM_CODE_COMMENT(this); DCHECK_NE(xmm_tmp, dst); DCHECK_NE(xmm_tmp, src); @@ -1049,9 +1056,9 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src, Psubq(dst, xmm_tmp); } -void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs, - XMMRegister rhs, XMMRegister tmp1, - XMMRegister tmp2) { +void SharedMacroAssemblerBase::I64x2Mul(XMMRegister dst, XMMRegister lhs, + XMMRegister rhs, XMMRegister tmp1, + XMMRegister tmp2) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(dst, tmp1, tmp2)); DCHECK(!AreAliased(lhs, tmp1, tmp2)); @@ -1099,9 +1106,10 @@ void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs, // 2. Unpack src1, src0 into even-number elements of dst. // 3. Multiply 1. with 2. // For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq. -void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister scratch, - bool low, bool is_signed) { +void SharedMacroAssemblerBase::I64x2ExtMul(XMMRegister dst, XMMRegister src1, + XMMRegister src2, + XMMRegister scratch, bool low, + bool is_signed) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -1130,8 +1138,8 @@ void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1, } } -void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst, - XMMRegister src) { +void SharedMacroAssemblerBase::I64x2SConvertI32x4High(XMMRegister dst, + XMMRegister src) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -1148,9 +1156,9 @@ void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst, } } -void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst, - XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::I64x2UConvertI32x4High(XMMRegister dst, + XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -1170,8 +1178,8 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst, } } -void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::S128Not(XMMRegister dst, XMMRegister src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); if (dst == src) { Pcmpeqd(scratch, scratch); @@ -1182,9 +1190,9 @@ void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src, } } -void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask, - XMMRegister src1, XMMRegister src2, - XMMRegister scratch) { +void SharedMacroAssemblerBase::S128Select(XMMRegister dst, XMMRegister mask, + XMMRegister src1, XMMRegister src2, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)). // pandn(x, y) = !x & y, so we have to flip the mask and input. @@ -1203,8 +1211,8 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask, } } -void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::S128Load8Splat(XMMRegister dst, Operand src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // The trap handler uses the current pc to creating a landing, so that it can // determine if a trap occured in Wasm code due to a OOB load. Make sure the @@ -1226,8 +1234,8 @@ void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src, } } -void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src, - XMMRegister scratch) { +void SharedMacroAssemblerBase::S128Load16Splat(XMMRegister dst, Operand src, + XMMRegister scratch) { ASM_CODE_COMMENT(this); // The trap handler uses the current pc to creating a landing, so that it can // determine if a trap occured in Wasm code due to a OOB load. Make sure the @@ -1248,7 +1256,7 @@ void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src, } } -void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) { +void SharedMacroAssemblerBase::S128Load32Splat(XMMRegister dst, Operand src) { ASM_CODE_COMMENT(this); // The trap handler uses the current pc to creating a landing, so that it can // determine if a trap occured in Wasm code due to a OOB load. Make sure the @@ -1262,8 +1270,8 @@ void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) { } } -void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src, - uint8_t laneidx) { +void SharedMacroAssemblerBase::S128Store64Lane(Operand dst, XMMRegister src, + uint8_t laneidx) { ASM_CODE_COMMENT(this); if (laneidx == 0) { Movlps(dst, src); @@ -1342,27 +1350,27 @@ void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src, sub##ps_or_pd(dst, tmp); \ } -void SharedTurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister src3, - XMMRegister tmp) { +void SharedMacroAssemblerBase::F32x4Qfma(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister src3, + XMMRegister tmp) { QFMA(ps) } -void SharedTurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister src3, - XMMRegister tmp) { +void SharedMacroAssemblerBase::F32x4Qfms(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister src3, + XMMRegister tmp) { QFMS(ps) } -void SharedTurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister src3, - XMMRegister tmp) { +void SharedMacroAssemblerBase::F64x2Qfma(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister src3, + XMMRegister tmp) { QFMA(pd); } -void SharedTurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1, - XMMRegister src2, XMMRegister src3, - XMMRegister tmp) { +void SharedMacroAssemblerBase::F64x2Qfms(XMMRegister dst, XMMRegister src1, + XMMRegister src2, XMMRegister src3, + XMMRegister tmp) { QFMS(pd); } diff --git a/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h index 66106f90fb..ae97572783 100644 --- a/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h +++ b/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h @@ -8,7 +8,7 @@ #include "src/base/macros.h" #include "src/codegen/cpu-features.h" #include "src/codegen/external-reference.h" -#include "src/codegen/turbo-assembler.h" +#include "src/codegen/macro-assembler-base.h" #if V8_TARGET_ARCH_IA32 #include "src/codegen/ia32/register-ia32.h" @@ -30,15 +30,15 @@ constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize; constexpr int kStackSavedSavedFPSize = kDoubleSize; #endif // V8_ENABLE_WEBASSEMBLY -// Base class for SharedTurboAssemblerBase. This class contains macro-assembler +// Base class for SharedMacroAssembler. This class contains macro-assembler // functions that can be shared across ia32 and x64 without any template // machinery, i.e. does not require the CRTP pattern that -// SharedTurboAssemblerBase exposes. This allows us to keep the bulk of +// SharedMacroAssembler exposes. This allows us to keep the bulk of // definition inside a separate source file, rather than putting everything // inside this header. -class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase { +class V8_EXPORT_PRIVATE SharedMacroAssemblerBase : public MacroAssemblerBase { public: - using TurboAssemblerBase::TurboAssemblerBase; + using MacroAssemblerBase::MacroAssemblerBase; void Move(Register dst, uint32_t src); // Move if registers are not identical. @@ -530,41 +530,41 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase { void I16x8SplatPreAvx2(XMMRegister dst, Op src); }; -// Common base class template shared by ia32 and x64 TurboAssembler. This uses +// Common base class template shared by ia32 and x64 MacroAssembler. This uses // the Curiously Recurring Template Pattern (CRTP), where Impl is the actual -// class (subclass of SharedTurboAssemblerBase instantiated with the actual +// class (subclass of SharedMacroAssembler instantiated with the actual // class). This allows static polymorphism, where member functions can be move -// into SharedTurboAssembler, and we can also call into member functions -// defined in ia32 or x64 specific TurboAssembler from within this template +// into SharedMacroAssemblerBase, and we can also call into member functions +// defined in ia32 or x64 specific MacroAssembler from within this template // class, via Impl. // // Note: all member functions must be defined in this header file so that the // compiler can generate code for the function definitions. See // https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale. -// If a function does not need polymorphism, move it into SharedTurboAssembler, -// and define it outside of this header. +// If a function does not need polymorphism, move it into +// SharedMacroAssemblerBase, and define it outside of this header. template -class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler { - using SharedTurboAssembler::SharedTurboAssembler; +class V8_EXPORT_PRIVATE SharedMacroAssembler : public SharedMacroAssemblerBase { + using SharedMacroAssemblerBase::SharedMacroAssemblerBase; public: void Abspd(XMMRegister dst, XMMRegister src, Register tmp) { - FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps, + FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps, ExternalReference::address_of_double_abs_constant()); } void Absps(XMMRegister dst, XMMRegister src, Register tmp) { - FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps, + FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps, ExternalReference::address_of_float_abs_constant()); } void Negpd(XMMRegister dst, XMMRegister src, Register tmp) { - FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps, + FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps, ExternalReference::address_of_double_neg_constant()); } void Negps(XMMRegister dst, XMMRegister src, Register tmp) { - FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps, + FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps, ExternalReference::address_of_float_neg_constant()); } #undef FLOAT_UNOP @@ -975,15 +975,16 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler { return impl()->ExternalReferenceAsOperand(reference, scratch); } - using FloatInstruction = void (SharedTurboAssembler::*)(XMMRegister, - XMMRegister, Operand); + using FloatInstruction = void (SharedMacroAssemblerBase::*)(XMMRegister, + XMMRegister, + Operand); void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp, FloatInstruction op, ExternalReference ext) { if (!CpuFeatures::IsSupported(AVX) && (dst != src)) { movaps(dst, src); src = dst; } - SharedTurboAssembler* assm = this; + SharedMacroAssemblerBase* assm = this; (assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp)); } }; diff --git a/src/codegen/source-position.cc b/src/codegen/source-position.cc index f85c8ca3b8..073265a442 100644 --- a/src/codegen/source-position.cc +++ b/src/codegen/source-position.cc @@ -79,10 +79,10 @@ std::vector SourcePosition::InliningStack(Isolate* isolate, } SourcePositionInfo SourcePosition::FirstInfo(Isolate* isolate, - Handle code) const { + Code code) const { DisallowGarbageCollection no_gc; DeoptimizationData deopt_data = - DeoptimizationData::cast(code->deoptimization_data()); + DeoptimizationData::cast(code.deoptimization_data()); SourcePosition pos = *this; if (pos.isInlined()) { InliningPosition inl = deopt_data.InliningPositions().get(pos.InliningId()); diff --git a/src/codegen/source-position.h b/src/codegen/source-position.h index bd74dd9ff8..8794926c25 100644 --- a/src/codegen/source-position.h +++ b/src/codegen/source-position.h @@ -83,7 +83,7 @@ class SourcePosition final { Code code) const; std::vector InliningStack( OptimizedCompilationInfo* cinfo) const; - SourcePositionInfo FirstInfo(Isolate* isolate, Handle code) const; + SourcePositionInfo FirstInfo(Isolate* isolate, Code code) const; void Print(std::ostream& out, InstructionStream code) const; void PrintJson(std::ostream& out) const; diff --git a/src/codegen/x64/assembler-x64-inl.h b/src/codegen/x64/assembler-x64-inl.h index a0bb1a1eee..4c851604e9 100644 --- a/src/codegen/x64/assembler-x64-inl.h +++ b/src/codegen/x64/assembler-x64-inl.h @@ -283,8 +283,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { if (IsCompressedEmbeddedObject(rmode_)) { Tagged_t compressed = ReadUnalignedValue(pc_); DCHECK(!HAS_SMI_TAG(compressed)); - Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, - compressed)); + Object obj( + V8HeapCompressionScheme::DecompressTagged(cage_base, compressed)); // Embedding of compressed InstructionStream objects must not happen when // external code space is enabled, because Codes must be used // instead. diff --git a/src/codegen/x64/macro-assembler-x64.cc b/src/codegen/x64/macro-assembler-x64.cc index cab8b87d8d..75891c3237 100644 --- a/src/codegen/x64/macro-assembler-x64.cc +++ b/src/codegen/x64/macro-assembler-x64.cc @@ -81,16 +81,16 @@ void MacroAssembler::Store(ExternalReference destination, Register source) { } } -void TurboAssembler::LoadFromConstantsTable(Register destination, +void MacroAssembler::LoadFromConstantsTable(Register destination, int constant_index) { DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); - LoadTaggedPointerField( + LoadTaggedField( destination, FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index))); } -void TurboAssembler::LoadRootRegisterOffset(Register destination, +void MacroAssembler::LoadRootRegisterOffset(Register destination, intptr_t offset) { DCHECK(is_int32(offset)); if (offset == 0) { @@ -100,11 +100,11 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination, } } -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { +void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) { movq(destination, Operand(kRootRegister, offset)); } -void TurboAssembler::LoadAddress(Register destination, +void MacroAssembler::LoadAddress(Register destination, ExternalReference source) { if (root_array_available_ && options().enable_root_relative_access) { intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source); @@ -124,7 +124,7 @@ void TurboAssembler::LoadAddress(Register destination, Move(destination, source); } -Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference, +Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference, Register scratch) { if (root_array_available_ && options().enable_root_relative_access) { int64_t delta = @@ -158,23 +158,23 @@ void MacroAssembler::PushAddress(ExternalReference source) { Push(kScratchRegister); } -Operand TurboAssembler::RootAsOperand(RootIndex index) { +Operand MacroAssembler::RootAsOperand(RootIndex index) { DCHECK(root_array_available()); return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index)); } -void TurboAssembler::LoadTaggedRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadTaggedRoot(Register destination, RootIndex index) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { - mov_tagged(destination, Immediate(ReadOnlyRootPtr(index))); + movq(destination, Immediate(ReadOnlyRootPtr(index))); return; } DCHECK(root_array_available_); movq(destination, RootAsOperand(index)); } -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { +void MacroAssembler::LoadRoot(Register destination, RootIndex index) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { - DecompressTaggedPointer(destination, ReadOnlyRootPtr(index)); + DecompressTagged(destination, ReadOnlyRootPtr(index)); return; } DCHECK(root_array_available_); @@ -186,7 +186,7 @@ void MacroAssembler::PushRoot(RootIndex index) { Push(RootAsOperand(index)); } -void TurboAssembler::CompareRoot(Register with, RootIndex index) { +void MacroAssembler::CompareRoot(Register with, RootIndex index) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { cmp_tagged(with, Immediate(ReadOnlyRootPtr(index))); return; @@ -201,7 +201,7 @@ void TurboAssembler::CompareRoot(Register with, RootIndex index) { } } -void TurboAssembler::CompareRoot(Operand with, RootIndex index) { +void MacroAssembler::CompareRoot(Operand with, RootIndex index) { if (V8_STATIC_ROOTS_BOOL && RootsTable::IsReadOnly(index)) { cmp_tagged(with, Immediate(ReadOnlyRootPtr(index))); return; @@ -219,25 +219,24 @@ void TurboAssembler::CompareRoot(Operand with, RootIndex index) { } } -void TurboAssembler::LoadMap(Register destination, Register object) { - LoadTaggedPointerField(destination, - FieldOperand(object, HeapObject::kMapOffset)); +void MacroAssembler::LoadMap(Register destination, Register object) { + LoadTaggedField(destination, FieldOperand(object, HeapObject::kMapOffset)); #ifdef V8_MAP_PACKING UnpackMapWord(destination); #endif } -void TurboAssembler::LoadTaggedPointerField(Register destination, - Operand field_operand) { +void MacroAssembler::LoadTaggedField(Register destination, + Operand field_operand) { if (COMPRESS_POINTERS_BOOL) { - DecompressTaggedPointer(destination, field_operand); + DecompressTagged(destination, field_operand); } else { mov_tagged(destination, field_operand); } } -void TurboAssembler::LoadTaggedPointerField(TaggedRegister destination, - Operand field_operand) { +void MacroAssembler::LoadTaggedField(TaggedRegister destination, + Operand field_operand) { if (COMPRESS_POINTERS_BOOL) { movl(destination.reg(), field_operand); } else { @@ -246,7 +245,7 @@ void TurboAssembler::LoadTaggedPointerField(TaggedRegister destination, } #ifdef V8_MAP_PACKING -void TurboAssembler::UnpackMapWord(Register r) { +void MacroAssembler::UnpackMapWord(Register r) { // Clear the top two bytes (which may include metadata). Must be in sync with // MapWord::Unpack, and vice versa. shlq(r, Immediate(16)); @@ -255,7 +254,7 @@ void TurboAssembler::UnpackMapWord(Register r) { } #endif -void TurboAssembler::LoadTaggedSignedField(Register destination, +void MacroAssembler::LoadTaggedSignedField(Register destination, Operand field_operand) { if (COMPRESS_POINTERS_BOOL) { DecompressTaggedSigned(destination, field_operand); @@ -264,55 +263,25 @@ void TurboAssembler::LoadTaggedSignedField(Register destination, } } -void TurboAssembler::LoadAnyTaggedField(Register destination, - Operand field_operand) { - if (COMPRESS_POINTERS_BOOL) { - DecompressAnyTagged(destination, field_operand); - } else { - mov_tagged(destination, field_operand); - } -} - -void TurboAssembler::LoadAnyTaggedField(TaggedRegister destination, - Operand field_operand) { - if (COMPRESS_POINTERS_BOOL) { - movl(destination.reg(), field_operand); - } else { - mov_tagged(destination.reg(), field_operand); - } -} - -void TurboAssembler::PushTaggedPointerField(Operand field_operand, - Register scratch) { +void MacroAssembler::PushTaggedField(Operand field_operand, Register scratch) { if (COMPRESS_POINTERS_BOOL) { DCHECK(!field_operand.AddressUsesRegister(scratch)); - DecompressTaggedPointer(scratch, field_operand); + DecompressTagged(scratch, field_operand); Push(scratch); } else { Push(field_operand); } } -void TurboAssembler::PushTaggedAnyField(Operand field_operand, - Register scratch) { - if (COMPRESS_POINTERS_BOOL) { - DCHECK(!field_operand.AddressUsesRegister(scratch)); - DecompressAnyTagged(scratch, field_operand); - Push(scratch); - } else { - Push(field_operand); - } -} - -void TurboAssembler::SmiUntagField(Register dst, Operand src) { +void MacroAssembler::SmiUntagField(Register dst, Operand src) { SmiUntag(dst, src); } -void TurboAssembler::SmiUntagFieldUnsigned(Register dst, Operand src) { +void MacroAssembler::SmiUntagFieldUnsigned(Register dst, Operand src) { SmiUntagUnsigned(dst, src); } -void TurboAssembler::StoreTaggedField(Operand dst_field_operand, +void MacroAssembler::StoreTaggedField(Operand dst_field_operand, Immediate value) { if (COMPRESS_POINTERS_BOOL) { movl(dst_field_operand, value); @@ -321,7 +290,7 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand, } } -void TurboAssembler::StoreTaggedField(Operand dst_field_operand, +void MacroAssembler::StoreTaggedField(Operand dst_field_operand, Register value) { if (COMPRESS_POINTERS_BOOL) { movl(dst_field_operand, value); @@ -330,7 +299,7 @@ void TurboAssembler::StoreTaggedField(Operand dst_field_operand, } } -void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand, +void MacroAssembler::StoreTaggedSignedField(Operand dst_field_operand, Smi value) { if (SmiValuesAre32Bits()) { Move(kScratchRegister, value); @@ -340,7 +309,7 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand, } } -void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand, +void MacroAssembler::AtomicStoreTaggedField(Operand dst_field_operand, Register value) { if (COMPRESS_POINTERS_BOOL) { movl(kScratchRegister, value); @@ -351,35 +320,27 @@ void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand, } } -void TurboAssembler::DecompressTaggedSigned(Register destination, +void MacroAssembler::DecompressTaggedSigned(Register destination, Operand field_operand) { ASM_CODE_COMMENT(this); movl(destination, field_operand); } -void TurboAssembler::DecompressTaggedPointer(Register destination, - Operand field_operand) { +void MacroAssembler::DecompressTagged(Register destination, + Operand field_operand) { ASM_CODE_COMMENT(this); movl(destination, field_operand); addq(destination, kPtrComprCageBaseRegister); } -void TurboAssembler::DecompressTaggedPointer(Register destination, - Register source) { +void MacroAssembler::DecompressTagged(Register destination, Register source) { ASM_CODE_COMMENT(this); movl(destination, source); addq(destination, kPtrComprCageBaseRegister); } -void TurboAssembler::DecompressAnyTagged(Register destination, - Operand field_operand) { - ASM_CODE_COMMENT(this); - movl(destination, field_operand); - addq(destination, kPtrComprCageBaseRegister); -} - -void TurboAssembler::DecompressTaggedPointer(Register destination, - Tagged_t immediate) { +void MacroAssembler::DecompressTagged(Register destination, + Tagged_t immediate) { ASM_CODE_COMMENT(this); leaq(destination, Operand(kPtrComprCageBaseRegister, static_cast(immediate))); @@ -427,7 +388,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } } -void TurboAssembler::EncodeSandboxedPointer(Register value) { +void MacroAssembler::EncodeSandboxedPointer(Register value) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX subq(value, kPtrComprCageBaseRegister); @@ -437,7 +398,7 @@ void TurboAssembler::EncodeSandboxedPointer(Register value) { #endif } -void TurboAssembler::DecodeSandboxedPointer(Register value) { +void MacroAssembler::DecodeSandboxedPointer(Register value) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX shrq(value, Immediate(kSandboxedPointerShift)); @@ -447,14 +408,14 @@ void TurboAssembler::DecodeSandboxedPointer(Register value) { #endif } -void TurboAssembler::LoadSandboxedPointerField(Register destination, +void MacroAssembler::LoadSandboxedPointerField(Register destination, Operand field_operand) { ASM_CODE_COMMENT(this); movq(destination, field_operand); DecodeSandboxedPointer(destination); } -void TurboAssembler::StoreSandboxedPointerField(Operand dst_field_operand, +void MacroAssembler::StoreSandboxedPointerField(Operand dst_field_operand, Register value) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(value, kScratchRegister)); @@ -464,7 +425,7 @@ void TurboAssembler::StoreSandboxedPointerField(Operand dst_field_operand, movq(dst_field_operand, kScratchRegister); } -void TurboAssembler::LoadExternalPointerField( +void MacroAssembler::LoadExternalPointerField( Register destination, Operand field_operand, ExternalPointerTag tag, Register scratch, IsolateRootLocation isolateRootLocation) { DCHECK(!AreAliased(destination, scratch)); @@ -493,7 +454,7 @@ void TurboAssembler::LoadExternalPointerField( #endif // V8_ENABLE_SANDBOX } -void TurboAssembler::CallEphemeronKeyBarrier(Register object, +void MacroAssembler::CallEphemeronKeyBarrier(Register object, Register slot_address, SaveFPRegsMode fp_mode) { ASM_CODE_COMMENT(this); @@ -508,12 +469,12 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, MovePair(slot_address_parameter, slot_address, object_parameter, object); Call(isolate()->builtins()->code_handle( - Builtins::GetEphemeronKeyBarrierStub(fp_mode)), - RelocInfo::CODE_TARGET); + Builtins::GetEphemeronKeyBarrierStub(fp_mode)), + RelocInfo::CODE_TARGET); PopAll(registers); } -void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, +void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { @@ -531,7 +492,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object, PopAll(registers); } -void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, +void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address, SaveFPRegsMode fp_mode, StubCallMode mode) { ASM_CODE_COMMENT(this); @@ -554,7 +515,7 @@ void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address, } #ifdef V8_IS_TSAN -void TurboAssembler::CallTSANStoreStub(Register address, Register value, +void MacroAssembler::CallTSANStoreStub(Register address, Register value, SaveFPRegsMode fp_mode, int size, StubCallMode mode, std::memory_order order) { @@ -600,7 +561,7 @@ void TurboAssembler::CallTSANStoreStub(Register address, Register value, PopAll(registers); } -void TurboAssembler::CallTSANRelaxedLoadStub(Register address, +void MacroAssembler::CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode, int size, StubCallMode mode) { TSANLoadDescriptor descriptor; @@ -694,7 +655,7 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address, } } -void TurboAssembler::Check(Condition cc, AbortReason reason) { +void MacroAssembler::Check(Condition cc, AbortReason reason) { Label L; j(cc, &L, Label::kNear); Abort(reason); @@ -702,7 +663,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) { bind(&L); } -void TurboAssembler::CheckStackAlignment() { +void MacroAssembler::CheckStackAlignment() { int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kSystemPointerSize) { @@ -717,7 +678,7 @@ void TurboAssembler::CheckStackAlignment() { } } -void TurboAssembler::Abort(AbortReason reason) { +void MacroAssembler::Abort(AbortReason reason) { ASM_CODE_COMMENT(this); if (v8_flags.code_comments) { const char* msg = GetAbortReason(reason); @@ -951,7 +912,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( bind(&maybe_has_optimized_code); Register optimized_code_entry = flags; - LoadAnyTaggedField( + LoadTaggedField( optimized_code_entry, FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset)); TailCallOptimizedCodeSlot(this, optimized_code_entry, closure, r9, @@ -959,7 +920,7 @@ void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( jump_mode); } -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) const { int bytes = 0; RegList saved_regs = kCallerSaved - exclusion; @@ -973,7 +934,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, +int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); int bytes = 0; @@ -985,7 +946,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, return bytes; } -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { +int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { ASM_CODE_COMMENT(this); int bytes = 0; if (fp_mode == SaveFPRegsMode::kSave) { @@ -996,7 +957,7 @@ int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { return bytes; } -int TurboAssembler::PushAll(RegList registers) { +int MacroAssembler::PushAll(RegList registers) { int bytes = 0; for (Register reg : registers) { pushq(reg); @@ -1005,7 +966,7 @@ int TurboAssembler::PushAll(RegList registers) { return bytes; } -int TurboAssembler::PopAll(RegList registers) { +int MacroAssembler::PopAll(RegList registers) { int bytes = 0; for (Register reg : base::Reversed(registers)) { popq(reg); @@ -1014,7 +975,7 @@ int TurboAssembler::PopAll(RegList registers) { return bytes; } -int TurboAssembler::PushAll(DoubleRegList registers, int stack_slot_size) { +int MacroAssembler::PushAll(DoubleRegList registers, int stack_slot_size) { if (registers.is_empty()) return 0; const int delta = stack_slot_size * registers.Count(); AllocateStackSpace(delta); @@ -1032,7 +993,7 @@ int TurboAssembler::PushAll(DoubleRegList registers, int stack_slot_size) { return delta; } -int TurboAssembler::PopAll(DoubleRegList registers, int stack_slot_size) { +int MacroAssembler::PopAll(DoubleRegList registers, int stack_slot_size) { if (registers.is_empty()) return 0; int slot = 0; for (XMMRegister reg : registers) { @@ -1049,7 +1010,7 @@ int TurboAssembler::PopAll(DoubleRegList registers, int stack_slot_size) { return slot; } -void TurboAssembler::Movq(XMMRegister dst, Register src) { +void MacroAssembler::Movq(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vmovq(dst, src); @@ -1058,7 +1019,7 @@ void TurboAssembler::Movq(XMMRegister dst, Register src) { } } -void TurboAssembler::Movq(Register dst, XMMRegister src) { +void MacroAssembler::Movq(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vmovq(dst, src); @@ -1067,7 +1028,7 @@ void TurboAssembler::Movq(Register dst, XMMRegister src) { } } -void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) { +void MacroAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vpextrq(dst, src, imm8); @@ -1077,7 +1038,7 @@ void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) { } } -void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtss2sd(dst, src, src); @@ -1086,7 +1047,7 @@ void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) { } } -void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtss2sd(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtss2sd(dst, dst, src); @@ -1095,7 +1056,7 @@ void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtsd2ss(dst, src, src); @@ -1104,7 +1065,7 @@ void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) { } } -void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtsd2ss(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtsd2ss(dst, dst, src); @@ -1113,7 +1074,7 @@ void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) { +void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtlsi2sd(dst, kScratchDoubleReg, src); @@ -1123,7 +1084,7 @@ void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) { } } -void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtlsi2sd(dst, kScratchDoubleReg, src); @@ -1133,7 +1094,7 @@ void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) { +void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtlsi2ss(dst, kScratchDoubleReg, src); @@ -1143,7 +1104,7 @@ void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) { } } -void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtlsi2ss(dst, kScratchDoubleReg, src); @@ -1153,7 +1114,7 @@ void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) { +void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtqsi2ss(dst, kScratchDoubleReg, src); @@ -1163,7 +1124,7 @@ void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) { } } -void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtqsi2ss(dst, kScratchDoubleReg, src); @@ -1173,7 +1134,7 @@ void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) { +void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Register src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtqsi2sd(dst, kScratchDoubleReg, src); @@ -1183,7 +1144,7 @@ void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) { } } -void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvtqsi2sd(dst, kScratchDoubleReg, src); @@ -1193,31 +1154,31 @@ void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) { } } -void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) { +void MacroAssembler::Cvtlui2ss(XMMRegister dst, Register src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2ss(dst, kScratchRegister); } -void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtlui2ss(XMMRegister dst, Operand src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2ss(dst, kScratchRegister); } -void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) { +void MacroAssembler::Cvtlui2sd(XMMRegister dst, Register src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2sd(dst, kScratchRegister); } -void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtlui2sd(XMMRegister dst, Operand src) { // Zero-extend the 32 bit value to 64 bit. movl(kScratchRegister, src); Cvtqsi2sd(dst, kScratchRegister); } -void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) { +void MacroAssembler::Cvtqui2ss(XMMRegister dst, Register src) { Label done; Cvtqsi2ss(dst, src); testq(src, src); @@ -1236,12 +1197,12 @@ void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) { bind(&done); } -void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtqui2ss(XMMRegister dst, Operand src) { movq(kScratchRegister, src); Cvtqui2ss(dst, kScratchRegister); } -void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) { +void MacroAssembler::Cvtqui2sd(XMMRegister dst, Register src) { Label done; Cvtqsi2sd(dst, src); testq(src, src); @@ -1260,12 +1221,12 @@ void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) { bind(&done); } -void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) { +void MacroAssembler::Cvtqui2sd(XMMRegister dst, Operand src) { movq(kScratchRegister, src); Cvtqui2sd(dst, kScratchRegister); } -void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) { +void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2si(dst, src); @@ -1274,7 +1235,7 @@ void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) { } } -void TurboAssembler::Cvttss2si(Register dst, Operand src) { +void MacroAssembler::Cvttss2si(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2si(dst, src); @@ -1283,7 +1244,7 @@ void TurboAssembler::Cvttss2si(Register dst, Operand src) { } } -void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) { +void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2si(dst, src); @@ -1292,7 +1253,7 @@ void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) { } } -void TurboAssembler::Cvttsd2si(Register dst, Operand src) { +void MacroAssembler::Cvttsd2si(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2si(dst, src); @@ -1301,7 +1262,7 @@ void TurboAssembler::Cvttsd2si(Register dst, Operand src) { } } -void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) { +void MacroAssembler::Cvttss2siq(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2siq(dst, src); @@ -1310,7 +1271,7 @@ void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) { } } -void TurboAssembler::Cvttss2siq(Register dst, Operand src) { +void MacroAssembler::Cvttss2siq(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttss2siq(dst, src); @@ -1319,7 +1280,7 @@ void TurboAssembler::Cvttss2siq(Register dst, Operand src) { } } -void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) { +void MacroAssembler::Cvttsd2siq(Register dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2siq(dst, src); @@ -1328,7 +1289,7 @@ void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) { } } -void TurboAssembler::Cvttsd2siq(Register dst, Operand src) { +void MacroAssembler::Cvttsd2siq(Register dst, Operand src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); vcvttsd2siq(dst, src); @@ -1339,115 +1300,115 @@ void TurboAssembler::Cvttsd2siq(Register dst, Operand src) { namespace { template -void ConvertFloatToUint64(TurboAssembler* tasm, Register dst, +void ConvertFloatToUint64(MacroAssembler* masm, Register dst, OperandOrXMMRegister src, Label* fail) { Label success; // There does not exist a native float-to-uint instruction, so we have to use // a float-to-int, and postprocess the result. if (is_double) { - tasm->Cvttsd2siq(dst, src); + masm->Cvttsd2siq(dst, src); } else { - tasm->Cvttss2siq(dst, src); + masm->Cvttss2siq(dst, src); } // If the result of the conversion is positive, we are already done. - tasm->testq(dst, dst); - tasm->j(positive, &success); + masm->testq(dst, dst); + masm->j(positive, &success); // The result of the first conversion was negative, which means that the // input value was not within the positive int64 range. We subtract 2^63 // and convert it again to see if it is within the uint64 range. if (is_double) { - tasm->Move(kScratchDoubleReg, -9223372036854775808.0); - tasm->Addsd(kScratchDoubleReg, src); - tasm->Cvttsd2siq(dst, kScratchDoubleReg); + masm->Move(kScratchDoubleReg, -9223372036854775808.0); + masm->Addsd(kScratchDoubleReg, src); + masm->Cvttsd2siq(dst, kScratchDoubleReg); } else { - tasm->Move(kScratchDoubleReg, -9223372036854775808.0f); - tasm->Addss(kScratchDoubleReg, src); - tasm->Cvttss2siq(dst, kScratchDoubleReg); + masm->Move(kScratchDoubleReg, -9223372036854775808.0f); + masm->Addss(kScratchDoubleReg, src); + masm->Cvttss2siq(dst, kScratchDoubleReg); } - tasm->testq(dst, dst); + masm->testq(dst, dst); // The only possible negative value here is 0x8000000000000000, which is // used on x64 to indicate an integer overflow. - tasm->j(negative, fail ? fail : &success); + masm->j(negative, fail ? fail : &success); // The input value is within uint64 range and the second conversion worked // successfully, but we still have to undo the subtraction we did // earlier. - tasm->Move(kScratchRegister, 0x8000000000000000); - tasm->orq(dst, kScratchRegister); - tasm->bind(&success); + masm->Move(kScratchRegister, 0x8000000000000000); + masm->orq(dst, kScratchRegister); + masm->bind(&success); } template -void ConvertFloatToUint32(TurboAssembler* tasm, Register dst, +void ConvertFloatToUint32(MacroAssembler* masm, Register dst, OperandOrXMMRegister src, Label* fail) { Label success; // There does not exist a native float-to-uint instruction, so we have to use // a float-to-int, and postprocess the result. if (is_double) { - tasm->Cvttsd2si(dst, src); + masm->Cvttsd2si(dst, src); } else { - tasm->Cvttss2si(dst, src); + masm->Cvttss2si(dst, src); } // If the result of the conversion is positive, we are already done. - tasm->testl(dst, dst); - tasm->j(positive, &success); + masm->testl(dst, dst); + masm->j(positive, &success); // The result of the first conversion was negative, which means that the // input value was not within the positive int32 range. We subtract 2^31 // and convert it again to see if it is within the uint32 range. if (is_double) { - tasm->Move(kScratchDoubleReg, -2147483648.0); - tasm->Addsd(kScratchDoubleReg, src); - tasm->Cvttsd2si(dst, kScratchDoubleReg); + masm->Move(kScratchDoubleReg, -2147483648.0); + masm->Addsd(kScratchDoubleReg, src); + masm->Cvttsd2si(dst, kScratchDoubleReg); } else { - tasm->Move(kScratchDoubleReg, -2147483648.0f); - tasm->Addss(kScratchDoubleReg, src); - tasm->Cvttss2si(dst, kScratchDoubleReg); + masm->Move(kScratchDoubleReg, -2147483648.0f); + masm->Addss(kScratchDoubleReg, src); + masm->Cvttss2si(dst, kScratchDoubleReg); } - tasm->testl(dst, dst); + masm->testl(dst, dst); // The only possible negative value here is 0x80000000, which is // used on x64 to indicate an integer overflow. - tasm->j(negative, fail ? fail : &success); + masm->j(negative, fail ? fail : &success); // The input value is within uint32 range and the second conversion worked // successfully, but we still have to undo the subtraction we did // earlier. - tasm->Move(kScratchRegister, 0x80000000); - tasm->orl(dst, kScratchRegister); - tasm->bind(&success); + masm->Move(kScratchRegister, 0x80000000); + masm->orl(dst, kScratchRegister); + masm->bind(&success); } } // namespace -void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) { +void MacroAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) { ConvertFloatToUint64(this, dst, src, fail); } -void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) { +void MacroAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) { ConvertFloatToUint64(this, dst, src, fail); } -void TurboAssembler::Cvttsd2ui(Register dst, Operand src, Label* fail) { +void MacroAssembler::Cvttsd2ui(Register dst, Operand src, Label* fail) { ConvertFloatToUint32(this, dst, src, fail); } -void TurboAssembler::Cvttsd2ui(Register dst, XMMRegister src, Label* fail) { +void MacroAssembler::Cvttsd2ui(Register dst, XMMRegister src, Label* fail) { ConvertFloatToUint32(this, dst, src, fail); } -void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) { +void MacroAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) { ConvertFloatToUint64(this, dst, src, fail); } -void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) { +void MacroAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) { ConvertFloatToUint64(this, dst, src, fail); } -void TurboAssembler::Cvttss2ui(Register dst, Operand src, Label* fail) { +void MacroAssembler::Cvttss2ui(Register dst, Operand src, Label* fail) { ConvertFloatToUint32(this, dst, src, fail); } -void TurboAssembler::Cvttss2ui(Register dst, XMMRegister src, Label* fail) { +void MacroAssembler::Cvttss2ui(Register dst, XMMRegister src, Label* fail) { ConvertFloatToUint32(this, dst, src, fail); } -void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vcmpeqss(dst, src); @@ -1456,7 +1417,7 @@ void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) { } } -void TurboAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); vcmpeqsd(dst, src); @@ -1468,12 +1429,12 @@ void TurboAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) { // ---------------------------------------------------------------------------- // Smi tagging, untagging and tag detection. -Register TurboAssembler::GetSmiConstant(Smi source) { +Register MacroAssembler::GetSmiConstant(Smi source) { Move(kScratchRegister, source); return kScratchRegister; } -void TurboAssembler::Cmp(Register dst, int32_t src) { +void MacroAssembler::Cmp(Register dst, int32_t src) { if (src == 0) { testl(dst, dst); } else { @@ -1481,7 +1442,7 @@ void TurboAssembler::Cmp(Register dst, int32_t src) { } } -void TurboAssembler::SmiTag(Register reg) { +void MacroAssembler::SmiTag(Register reg) { static_assert(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); if (COMPRESS_POINTERS_BOOL) { @@ -1492,7 +1453,7 @@ void TurboAssembler::SmiTag(Register reg) { } } -void TurboAssembler::SmiTag(Register dst, Register src) { +void MacroAssembler::SmiTag(Register dst, Register src) { DCHECK(dst != src); if (COMPRESS_POINTERS_BOOL) { movl(dst, src); @@ -1502,7 +1463,7 @@ void TurboAssembler::SmiTag(Register dst, Register src) { SmiTag(dst); } -void TurboAssembler::SmiUntag(Register reg) { +void MacroAssembler::SmiUntag(Register reg) { static_assert(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); // TODO(v8:7703): Is there a way to avoid this sign extension when pointer @@ -1513,7 +1474,7 @@ void TurboAssembler::SmiUntag(Register reg) { sarq(reg, Immediate(kSmiShift)); } -void TurboAssembler::SmiUntagUnsigned(Register reg) { +void MacroAssembler::SmiUntagUnsigned(Register reg) { static_assert(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); if (COMPRESS_POINTERS_BOOL) { @@ -1524,7 +1485,7 @@ void TurboAssembler::SmiUntagUnsigned(Register reg) { } } -void TurboAssembler::SmiUntag(Register dst, Register src) { +void MacroAssembler::SmiUntag(Register dst, Register src) { DCHECK(dst != src); if (COMPRESS_POINTERS_BOOL) { movsxlq(dst, src); @@ -1538,7 +1499,7 @@ void TurboAssembler::SmiUntag(Register dst, Register src) { sarq(dst, Immediate(kSmiShift)); } -void TurboAssembler::SmiUntag(Register dst, Operand src) { +void MacroAssembler::SmiUntag(Register dst, Operand src) { if (SmiValuesAre32Bits()) { // Sign extend to 64-bit. movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); @@ -1553,7 +1514,7 @@ void TurboAssembler::SmiUntag(Register dst, Operand src) { } } -void TurboAssembler::SmiUntagUnsigned(Register dst, Operand src) { +void MacroAssembler::SmiUntagUnsigned(Register dst, Operand src) { if (SmiValuesAre32Bits()) { // Zero extend to 64-bit. movl(dst, Operand(src, kSmiShift / kBitsPerByte)); @@ -1570,7 +1531,7 @@ void TurboAssembler::SmiUntagUnsigned(Register dst, Operand src) { } } -void TurboAssembler::SmiToInt32(Register reg) { +void MacroAssembler::SmiToInt32(Register reg) { static_assert(kSmiTag == 0); DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits()); if (COMPRESS_POINTERS_BOOL) { @@ -1580,24 +1541,24 @@ void TurboAssembler::SmiToInt32(Register reg) { } } -void TurboAssembler::SmiToInt32(Register dst, Register src) { +void MacroAssembler::SmiToInt32(Register dst, Register src) { DCHECK(dst != src); mov_tagged(dst, src); SmiToInt32(dst); } -void TurboAssembler::SmiCompare(Register smi1, Register smi2) { +void MacroAssembler::SmiCompare(Register smi1, Register smi2) { AssertSmi(smi1); AssertSmi(smi2); cmp_tagged(smi1, smi2); } -void TurboAssembler::SmiCompare(Register dst, Smi src) { +void MacroAssembler::SmiCompare(Register dst, Smi src) { AssertSmi(dst); Cmp(dst, src); } -void TurboAssembler::Cmp(Register dst, Smi src) { +void MacroAssembler::Cmp(Register dst, Smi src) { if (src.value() == 0) { test_tagged(dst, dst); } else if (COMPRESS_POINTERS_BOOL) { @@ -1609,19 +1570,19 @@ void TurboAssembler::Cmp(Register dst, Smi src) { } } -void TurboAssembler::SmiCompare(Register dst, Operand src) { +void MacroAssembler::SmiCompare(Register dst, Operand src) { AssertSmi(dst); AssertSmi(src); cmp_tagged(dst, src); } -void TurboAssembler::SmiCompare(Operand dst, Register src) { +void MacroAssembler::SmiCompare(Operand dst, Register src) { AssertSmi(dst); AssertSmi(src); cmp_tagged(dst, src); } -void TurboAssembler::SmiCompare(Operand dst, Smi src) { +void MacroAssembler::SmiCompare(Operand dst, Smi src) { AssertSmi(dst); if (SmiValuesAre32Bits()) { cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src.value())); @@ -1631,44 +1592,44 @@ void TurboAssembler::SmiCompare(Operand dst, Smi src) { } } -void TurboAssembler::Cmp(Operand dst, Smi src) { +void MacroAssembler::Cmp(Operand dst, Smi src) { // The Operand cannot use the smi register. Register smi_reg = GetSmiConstant(src); DCHECK(!dst.AddressUsesRegister(smi_reg)); cmp_tagged(dst, smi_reg); } -Condition TurboAssembler::CheckSmi(Register src) { +Condition MacroAssembler::CheckSmi(Register src) { static_assert(kSmiTag == 0); testb(src, Immediate(kSmiTagMask)); return zero; } -Condition TurboAssembler::CheckSmi(Operand src) { +Condition MacroAssembler::CheckSmi(Operand src) { static_assert(kSmiTag == 0); testb(src, Immediate(kSmiTagMask)); return zero; } -void TurboAssembler::JumpIfSmi(Register src, Label* on_smi, +void MacroAssembler::JumpIfSmi(Register src, Label* on_smi, Label::Distance near_jump) { Condition smi = CheckSmi(src); j(smi, on_smi, near_jump); } -void TurboAssembler::JumpIfNotSmi(Register src, Label* on_not_smi, +void MacroAssembler::JumpIfNotSmi(Register src, Label* on_not_smi, Label::Distance near_jump) { Condition smi = CheckSmi(src); j(NegateCondition(smi), on_not_smi, near_jump); } -void TurboAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi, +void MacroAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi, Label::Distance near_jump) { Condition smi = CheckSmi(src); j(NegateCondition(smi), on_not_smi, near_jump); } -void TurboAssembler::SmiAddConstant(Operand dst, Smi constant) { +void MacroAssembler::SmiAddConstant(Operand dst, Smi constant) { if (constant.value() != 0) { if (SmiValuesAre32Bits()) { addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant.value())); @@ -1688,7 +1649,7 @@ void TurboAssembler::SmiAddConstant(Operand dst, Smi constant) { } } -SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) { +SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) { if (SmiValuesAre32Bits()) { DCHECK(is_uint6(shift)); // There is a possible optimization if shift is in the range 60-63, but that @@ -1719,7 +1680,7 @@ SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) { } } -void TurboAssembler::Switch(Register scratch, Register reg, int case_value_base, +void MacroAssembler::Switch(Register scratch, Register reg, int case_value_base, Label** labels, int num_labels) { Register table = scratch; Label fallthrough, jump_table; @@ -1739,7 +1700,7 @@ void TurboAssembler::Switch(Register scratch, Register reg, int case_value_base, bind(&fallthrough); } -void TurboAssembler::Push(Smi source) { +void MacroAssembler::Push(Smi source) { intptr_t smi = static_cast(source.ptr()); if (is_int32(smi)) { Push(Immediate(static_cast(smi))); @@ -1760,7 +1721,7 @@ void TurboAssembler::Push(Smi source) { // ---------------------------------------------------------------------------- -void TurboAssembler::Move(Register dst, Smi source) { +void MacroAssembler::Move(Register dst, Smi source) { static_assert(kSmiTag == 0); int value = source.value(); if (value == 0) { @@ -1773,7 +1734,7 @@ void TurboAssembler::Move(Register dst, Smi source) { } } -void TurboAssembler::Move(Operand dst, intptr_t x) { +void MacroAssembler::Move(Operand dst, intptr_t x) { if (is_int32(x)) { movq(dst, Immediate(static_cast(x))); } else { @@ -1782,7 +1743,7 @@ void TurboAssembler::Move(Operand dst, intptr_t x) { } } -void TurboAssembler::Move(Register dst, ExternalReference ext) { +void MacroAssembler::Move(Register dst, ExternalReference ext) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than // embedding the relocatable value. @@ -1793,14 +1754,14 @@ void TurboAssembler::Move(Register dst, ExternalReference ext) { movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE)); } -void TurboAssembler::Move(Register dst, Register src) { +void MacroAssembler::Move(Register dst, Register src) { if (dst != src) { movq(dst, src); } } -void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); } -void TurboAssembler::Move(Register dst, Immediate src) { +void MacroAssembler::Move(Register dst, Operand src) { movq(dst, src); } +void MacroAssembler::Move(Register dst, Immediate src) { if (src.rmode() == RelocInfo::Mode::NO_INFO) { Move(dst, src.value()); } else { @@ -1808,13 +1769,13 @@ void TurboAssembler::Move(Register dst, Immediate src) { } } -void TurboAssembler::Move(XMMRegister dst, XMMRegister src) { +void MacroAssembler::Move(XMMRegister dst, XMMRegister src) { if (dst != src) { Movaps(dst, src); } } -void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, +void MacroAssembler::MovePair(Register dst0, Register src0, Register dst1, Register src1) { if (dst0 != src1) { // Normal case: Writing to dst0 does not destroy src1. @@ -1833,7 +1794,7 @@ void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1, } } -void TurboAssembler::MoveNumber(Register dst, double value) { +void MacroAssembler::MoveNumber(Register dst, double value) { int32_t smi; if (DoubleToSmiInteger(value, &smi)) { Move(dst, Smi::FromInt(smi)); @@ -1842,7 +1803,7 @@ void TurboAssembler::MoveNumber(Register dst, double value) { } } -void TurboAssembler::Move(XMMRegister dst, uint32_t src) { +void MacroAssembler::Move(XMMRegister dst, uint32_t src) { if (src == 0) { Xorps(dst, dst); } else { @@ -1861,7 +1822,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) { } } -void TurboAssembler::Move(XMMRegister dst, uint64_t src) { +void MacroAssembler::Move(XMMRegister dst, uint64_t src) { if (src == 0) { Xorpd(dst, dst); } else { @@ -1886,7 +1847,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) { } } -void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) { +void MacroAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) { if (high == low) { Move(dst, low); Punpcklqdq(dst, dst); @@ -1967,12 +1928,12 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, j(below_equal, on_in_range, near_jump); } -void TurboAssembler::Push(Handle source) { +void MacroAssembler::Push(Handle source) { Move(kScratchRegister, source); Push(kScratchRegister); } -void TurboAssembler::PushArray(Register array, Register size, Register scratch, +void MacroAssembler::PushArray(Register array, Register size, Register scratch, PushArrayOrder order) { DCHECK(!AreAliased(array, size, scratch)); Register counter = scratch; @@ -1997,7 +1958,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch, } } -void TurboAssembler::Move(Register result, Handle object, +void MacroAssembler::Move(Register result, Handle object, RelocInfo::Mode rmode) { // TODO(jgruber,v8:8887): Also consider a root-relative load when generating // non-isolate-independent code. In many cases it might be cheaper than @@ -2016,7 +1977,7 @@ void TurboAssembler::Move(Register result, Handle object, } } -void TurboAssembler::Move(Operand dst, Handle object, +void MacroAssembler::Move(Operand dst, Handle object, RelocInfo::Mode rmode) { Move(kScratchRegister, object, rmode); movq(dst, kScratchRegister); @@ -2041,7 +2002,7 @@ void MacroAssembler::DropUnderReturnAddress(int stack_elements, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, +void MacroAssembler::DropArguments(Register count, ArgumentsCountType type, ArgumentsCountMode mode) { int receiver_bytes = (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0; @@ -2066,7 +2027,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, } } -void TurboAssembler::DropArguments(Register count, Register scratch, +void MacroAssembler::DropArguments(Register count, Register scratch, ArgumentsCountType type, ArgumentsCountMode mode) { DCHECK(!AreAliased(count, scratch)); @@ -2075,7 +2036,7 @@ void TurboAssembler::DropArguments(Register count, Register scratch, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Register receiver, Register scratch, ArgumentsCountType type, @@ -2087,7 +2048,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, PushReturnAddressFrom(scratch); } -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, +void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc, Operand receiver, Register scratch, ArgumentsCountType type, @@ -2100,13 +2061,13 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, PushReturnAddressFrom(scratch); } -void TurboAssembler::Push(Register src) { pushq(src); } +void MacroAssembler::Push(Register src) { pushq(src); } -void TurboAssembler::Push(Operand src) { pushq(src); } +void MacroAssembler::Push(Operand src) { pushq(src); } void MacroAssembler::PushQuad(Operand src) { pushq(src); } -void TurboAssembler::Push(Immediate value) { pushq(value); } +void MacroAssembler::Push(Immediate value) { pushq(value); } void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); } @@ -2116,27 +2077,27 @@ void MacroAssembler::Pop(Operand dst) { popq(dst); } void MacroAssembler::PopQuad(Operand dst) { popq(dst); } -void TurboAssembler::Jump(const ExternalReference& reference) { +void MacroAssembler::Jump(const ExternalReference& reference) { DCHECK(root_array_available()); jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry( isolate(), reference))); } -void TurboAssembler::Jump(Operand op) { jmp(op); } +void MacroAssembler::Jump(Operand op) { jmp(op); } -void TurboAssembler::Jump(Operand op, Condition cc) { +void MacroAssembler::Jump(Operand op, Condition cc) { Label skip; j(NegateCondition(cc), &skip, Label::kNear); Jump(op); bind(&skip); } -void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) { +void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) { Move(kScratchRegister, destination, rmode); jmp(kScratchRegister); } -void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode, Condition cc) { Label skip; j(NegateCondition(cc), &skip, Label::kNear); @@ -2144,7 +2105,7 @@ void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode, bind(&skip); } -void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { +void MacroAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); Builtin builtin = Builtin::kNoBuiltinId; @@ -2156,7 +2117,7 @@ void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { jmp(code_object, rmode); } -void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode, +void MacroAssembler::Jump(Handle code_object, RelocInfo::Mode rmode, Condition cc) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); @@ -2174,12 +2135,12 @@ void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) { jmp(kOffHeapTrampolineRegister); } -void TurboAssembler::Call(ExternalReference ext) { +void MacroAssembler::Call(ExternalReference ext) { LoadAddress(kScratchRegister, ext); call(kScratchRegister); } -void TurboAssembler::Call(Operand op) { +void MacroAssembler::Call(Operand op) { if (!CpuFeatures::IsSupported(INTEL_ATOM)) { call(op); } else { @@ -2188,12 +2149,12 @@ void TurboAssembler::Call(Operand op) { } } -void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) { Move(kScratchRegister, destination, rmode); call(kScratchRegister); } -void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { +void MacroAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { DCHECK_IMPLIES(options().isolate_independent_code, Builtins::IsIsolateIndependentBuiltin(*code_object)); Builtin builtin = Builtin::kNoBuiltinId; @@ -2205,12 +2166,12 @@ void TurboAssembler::Call(Handle code_object, RelocInfo::Mode rmode) { call(code_object, rmode); } -Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { +Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { DCHECK(root_array_available()); return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin)); } -Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { +Operand MacroAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { if (SmiValuesAre32Bits()) { // The builtin_index register contains the builtin index as a Smi. SmiUntagUnsigned(builtin_index); @@ -2227,11 +2188,11 @@ Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) { } } -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { +void MacroAssembler::CallBuiltinByIndex(Register builtin_index) { Call(EntryFromBuiltinIndexAsOperand(builtin_index)); } -void TurboAssembler::CallBuiltin(Builtin builtin) { +void MacroAssembler::CallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin)); switch (options().builtin_call_jump_mode) { case BuiltinCallJumpMode::kAbsolute: @@ -2251,7 +2212,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin) { +void MacroAssembler::TailCallBuiltin(Builtin builtin) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); switch (options().builtin_call_jump_mode) { @@ -2272,7 +2233,7 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) { } } -void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cc) { +void MacroAssembler::TailCallBuiltin(Builtin builtin, Condition cc) { ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("tail call", builtin)); switch (options().builtin_call_jump_mode) { @@ -2293,12 +2254,12 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin, Condition cc) { } } -void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) { +void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) { ASM_CODE_COMMENT(this); movq(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset)); } -void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, +void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, Register code_object) { ASM_CODE_COMMENT(this); // Compute the InstructionStream object pointer from the code entry point. @@ -2306,12 +2267,12 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination, subq(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag)); } -void TurboAssembler::CallCodeObject(Register code_object) { +void MacroAssembler::CallCodeObject(Register code_object) { LoadCodeEntry(code_object, code_object); call(code_object); } -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { +void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { LoadCodeEntry(code_object, code_object); switch (jump_mode) { case JumpMode::kJump: @@ -2324,7 +2285,7 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { } } -void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src, +void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src, uint8_t imm8) { if (imm8 == 0) { Movd(dst, src); @@ -2337,42 +2298,42 @@ void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src, namespace { template -void PinsrdPreSse41Helper(TurboAssembler* tasm, XMMRegister dst, Op src, +void PinsrdPreSse41Helper(MacroAssembler* masm, XMMRegister dst, Op src, uint8_t imm8, uint32_t* load_pc_offset) { - tasm->Movd(kScratchDoubleReg, src); - if (load_pc_offset) *load_pc_offset = tasm->pc_offset(); + masm->Movd(kScratchDoubleReg, src); + if (load_pc_offset) *load_pc_offset = masm->pc_offset(); if (imm8 == 1) { - tasm->punpckldq(dst, kScratchDoubleReg); + masm->punpckldq(dst, kScratchDoubleReg); } else { DCHECK_EQ(0, imm8); - tasm->Movss(dst, kScratchDoubleReg); + masm->Movss(dst, kScratchDoubleReg); } } } // namespace -void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8, +void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8, uint32_t* load_pc_offset) { PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset); } -void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, +void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8, uint32_t* load_pc_offset) { PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset); } -void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, +void MacroAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8, uint32_t* load_pc_offset) { PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2, imm8, load_pc_offset, {SSE4_1}); } -void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, +void MacroAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8, uint32_t* load_pc_offset) { PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2, imm8, load_pc_offset, {SSE4_1}); } -void TurboAssembler::Lzcntl(Register dst, Register src) { +void MacroAssembler::Lzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntl(dst, src); @@ -2386,7 +2347,7 @@ void TurboAssembler::Lzcntl(Register dst, Register src) { xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x } -void TurboAssembler::Lzcntl(Register dst, Operand src) { +void MacroAssembler::Lzcntl(Register dst, Operand src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntl(dst, src); @@ -2400,7 +2361,7 @@ void TurboAssembler::Lzcntl(Register dst, Operand src) { xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x } -void TurboAssembler::Lzcntq(Register dst, Register src) { +void MacroAssembler::Lzcntq(Register dst, Register src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntq(dst, src); @@ -2414,7 +2375,7 @@ void TurboAssembler::Lzcntq(Register dst, Register src) { xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x } -void TurboAssembler::Lzcntq(Register dst, Operand src) { +void MacroAssembler::Lzcntq(Register dst, Operand src) { if (CpuFeatures::IsSupported(LZCNT)) { CpuFeatureScope scope(this, LZCNT); lzcntq(dst, src); @@ -2428,7 +2389,7 @@ void TurboAssembler::Lzcntq(Register dst, Operand src) { xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x } -void TurboAssembler::Tzcntq(Register dst, Register src) { +void MacroAssembler::Tzcntq(Register dst, Register src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntq(dst, src); @@ -2442,7 +2403,7 @@ void TurboAssembler::Tzcntq(Register dst, Register src) { bind(¬_zero_src); } -void TurboAssembler::Tzcntq(Register dst, Operand src) { +void MacroAssembler::Tzcntq(Register dst, Operand src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntq(dst, src); @@ -2456,7 +2417,7 @@ void TurboAssembler::Tzcntq(Register dst, Operand src) { bind(¬_zero_src); } -void TurboAssembler::Tzcntl(Register dst, Register src) { +void MacroAssembler::Tzcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntl(dst, src); @@ -2469,7 +2430,7 @@ void TurboAssembler::Tzcntl(Register dst, Register src) { bind(¬_zero_src); } -void TurboAssembler::Tzcntl(Register dst, Operand src) { +void MacroAssembler::Tzcntl(Register dst, Operand src) { if (CpuFeatures::IsSupported(BMI1)) { CpuFeatureScope scope(this, BMI1); tzcntl(dst, src); @@ -2482,7 +2443,7 @@ void TurboAssembler::Tzcntl(Register dst, Operand src) { bind(¬_zero_src); } -void TurboAssembler::Popcntl(Register dst, Register src) { +void MacroAssembler::Popcntl(Register dst, Register src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntl(dst, src); @@ -2491,7 +2452,7 @@ void TurboAssembler::Popcntl(Register dst, Register src) { UNREACHABLE(); } -void TurboAssembler::Popcntl(Register dst, Operand src) { +void MacroAssembler::Popcntl(Register dst, Operand src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntl(dst, src); @@ -2500,7 +2461,7 @@ void TurboAssembler::Popcntl(Register dst, Operand src) { UNREACHABLE(); } -void TurboAssembler::Popcntq(Register dst, Register src) { +void MacroAssembler::Popcntq(Register dst, Register src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntq(dst, src); @@ -2509,7 +2470,7 @@ void TurboAssembler::Popcntq(Register dst, Register src) { UNREACHABLE(); } -void TurboAssembler::Popcntq(Register dst, Operand src) { +void MacroAssembler::Popcntq(Register dst, Operand src) { if (CpuFeatures::IsSupported(POPCNT)) { CpuFeatureScope scope(this, POPCNT); popcntq(dst, src); @@ -2542,9 +2503,9 @@ void MacroAssembler::PopStackHandler() { addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize)); } -void TurboAssembler::Ret() { ret(0); } +void MacroAssembler::Ret() { ret(0); } -void TurboAssembler::Ret(int bytes_dropped, Register scratch) { +void MacroAssembler::Ret(int bytes_dropped, Register scratch) { if (is_uint16(bytes_dropped)) { ret(bytes_dropped); } else { @@ -2555,7 +2516,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) { } } -void TurboAssembler::IncsspqIfSupported(Register number_of_words, +void MacroAssembler::IncsspqIfSupported(Register number_of_words, Register scratch) { // Optimized code can validate at runtime whether the cpu supports the // incsspq instruction, so it shouldn't use this method. @@ -2578,7 +2539,7 @@ void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type, CmpInstanceType(map, type); } -void TurboAssembler::CmpInstanceType(Register map, InstanceType type) { +void MacroAssembler::CmpInstanceType(Register map, InstanceType type) { cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type)); } @@ -2602,28 +2563,28 @@ Immediate MacroAssembler::ClearedValue() const { } #ifdef V8_ENABLE_DEBUG_CODE -void TurboAssembler::AssertNotSmi(Register object) { +void MacroAssembler::AssertNotSmi(Register object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Condition is_smi = CheckSmi(object); Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi); } -void TurboAssembler::AssertSmi(Register object) { +void MacroAssembler::AssertSmi(Register object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Condition is_smi = CheckSmi(object); Check(is_smi, AbortReason::kOperandIsNotASmi); } -void TurboAssembler::AssertSmi(Operand object) { +void MacroAssembler::AssertSmi(Operand object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); Condition is_smi = CheckSmi(object); Check(is_smi, AbortReason::kOperandIsNotASmi); } -void TurboAssembler::AssertZeroExtended(Register int32_register) { +void MacroAssembler::AssertZeroExtended(Register int32_register) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); DCHECK_NE(int32_register, kScratchRegister); @@ -2632,7 +2593,7 @@ void TurboAssembler::AssertZeroExtended(Register int32_register) { Check(below_equal, AbortReason::k32BitValueInRegisterIsNotZeroExtended); } -void TurboAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) { +void MacroAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); DCHECK(COMPRESS_POINTERS_BOOL); @@ -2640,7 +2601,7 @@ void TurboAssembler::AssertSignedBitOfSmiIsZero(Register smi_register) { Check(zero, AbortReason::kSignedBitOfSmiIsNotZero); } -void TurboAssembler::AssertMap(Register object) { +void MacroAssembler::AssertMap(Register object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); testb(object, Immediate(kSmiTagMask)); @@ -2652,7 +2613,7 @@ void TurboAssembler::AssertMap(Register object) { Check(equal, AbortReason::kOperandIsNotAMap); } -void TurboAssembler::AssertCode(Register object) { +void MacroAssembler::AssertCode(Register object) { if (!v8_flags.debug_code) return; ASM_CODE_COMMENT(this); testb(object, Immediate(kSmiTagMask)); @@ -2749,11 +2710,11 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { bind(&done_checking); } -void TurboAssembler::Assert(Condition cc, AbortReason reason) { +void MacroAssembler::Assert(Condition cc, AbortReason reason) { if (v8_flags.debug_code) Check(cc, reason); } -void TurboAssembler::AssertUnreachable(AbortReason reason) { +void MacroAssembler::AssertUnreachable(AbortReason reason) { if (v8_flags.debug_code) Abort(reason); } #endif // V8_ENABLE_DEBUG_CODE @@ -2803,7 +2764,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target, Register actual_parameter_count, InvokeType type) { ASM_CODE_COMMENT(this); - LoadTaggedPointerField( + LoadTaggedField( rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); movzxwq(rbx, FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset)); @@ -2816,8 +2777,7 @@ void MacroAssembler::InvokeFunction(Register function, Register new_target, Register actual_parameter_count, InvokeType type) { DCHECK_EQ(function, rdi); - LoadTaggedPointerField(rsi, - FieldOperand(function, JSFunction::kContextOffset)); + LoadTaggedField(rsi, FieldOperand(function, JSFunction::kContextOffset)); InvokeFunctionCode(rdi, new_target, expected_parameter_count, actual_parameter_count, type); } @@ -2857,7 +2817,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, // allow recompilation to take effect without changing any of the // call sites. static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); - LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset)); + LoadTaggedField(rcx, FieldOperand(function, JSFunction::kCodeOffset)); switch (type) { case InvokeType::kCall: CallCodeObject(rcx); @@ -2884,10 +2844,10 @@ Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) { kind == StackLimitKind::kRealStackLimit ? ExternalReference::address_of_real_jslimit(isolate) : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); + DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit)); intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); + MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit); CHECK(is_int32(offset)); return Operand(kRootRegister, static_cast(offset)); } @@ -3015,14 +2975,14 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, SmiUntag(expected_parameter_count); } -void TurboAssembler::StubPrologue(StackFrame::Type type) { +void MacroAssembler::StubPrologue(StackFrame::Type type) { ASM_CODE_COMMENT(this); pushq(rbp); // Caller's frame pointer. movq(rbp, rsp); Push(Immediate(StackFrame::TypeToMarker(type))); } -void TurboAssembler::Prologue() { +void MacroAssembler::Prologue() { ASM_CODE_COMMENT(this); pushq(rbp); // Caller's frame pointer. movq(rbp, rsp); @@ -3031,7 +2991,7 @@ void TurboAssembler::Prologue() { Push(kJavaScriptCallArgCountRegister); // Actual argument count. } -void TurboAssembler::EnterFrame(StackFrame::Type type) { +void MacroAssembler::EnterFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); pushq(rbp); movq(rbp, rsp); @@ -3043,7 +3003,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) { #endif // V8_ENABLE_WEBASSEMBLY } -void TurboAssembler::LeaveFrame(StackFrame::Type type) { +void MacroAssembler::LeaveFrame(StackFrame::Type type) { ASM_CODE_COMMENT(this); // TODO(v8:11429): Consider passing BASELINE instead, and checking for // IsJSFrame or similar. Could then unify with manual frame leaves in the @@ -3058,7 +3018,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) { } #if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOS) -void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { +void MacroAssembler::AllocateStackSpace(Register bytes_scratch) { ASM_CODE_COMMENT(this); // On Windows and on macOS, we cannot increment the stack size by more than // one page (minimum page size is 4KB) without accessing at least one byte on @@ -3080,7 +3040,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) { subq(rsp, bytes_scratch); } -void TurboAssembler::AllocateStackSpace(int bytes) { +void MacroAssembler::AllocateStackSpace(int bytes) { ASM_CODE_COMMENT(this); DCHECK_GE(bytes, 0); while (bytes >= kStackPageSize) { @@ -3227,11 +3187,11 @@ void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { ASM_CODE_COMMENT(this); // Load native context. LoadMap(dst, rsi); - LoadTaggedPointerField( + LoadTaggedField( dst, FieldOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); // Load value from native context. - LoadTaggedPointerField(dst, Operand(dst, Context::SlotOffset(index))); + LoadTaggedField(dst, Operand(dst, Context::SlotOffset(index))); } void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, @@ -3240,7 +3200,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Label* on_result, Label::Distance distance) { Label fallthrough; - LoadTaggedPointerField( + LoadTaggedField( scratch_and_result, FieldOperand(feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()))); @@ -3260,7 +3220,7 @@ void MacroAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, Move(scratch_and_result, 0); } -int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { +int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { // On Windows 64 stack slots are reserved by the caller for all arguments // including the ones passed in registers, and space is always allocated for // the four register arguments even if the function takes fewer than four @@ -3278,7 +3238,7 @@ int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { #endif } -void TurboAssembler::PrepareCallCFunction(int num_arguments) { +void MacroAssembler::PrepareCallCFunction(int num_arguments) { ASM_CODE_COMMENT(this); int frame_alignment = base::OS::ActivationFrameAlignment(); DCHECK_NE(frame_alignment, 0); @@ -3295,14 +3255,14 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments) { kScratchRegister); } -void TurboAssembler::CallCFunction(ExternalReference function, +void MacroAssembler::CallCFunction(ExternalReference function, int num_arguments) { ASM_CODE_COMMENT(this); LoadAddress(rax, function); CallCFunction(rax, num_arguments); } -void TurboAssembler::CallCFunction(Register function, int num_arguments) { +void MacroAssembler::CallCFunction(Register function, int num_arguments) { ASM_CODE_COMMENT(this); DCHECK_LE(num_arguments, kMaxCParameters); DCHECK(has_frame()); @@ -3376,7 +3336,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) { movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize)); } -void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, +void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, Label::Distance condition_met_distance) { ASM_CODE_COMMENT(this); @@ -3396,7 +3356,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, j(cc, condition_met, condition_met_distance); } -void TurboAssembler::ComputeCodeStartAddress(Register dst) { +void MacroAssembler::ComputeCodeStartAddress(Register dst) { Label current; bind(¤t); int pc = pc_offset(); @@ -3411,22 +3371,21 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { // the flags in the referenced {Code} object; // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. -void TurboAssembler::BailoutIfDeoptimized(Register scratch) { +void MacroAssembler::BailoutIfDeoptimized(Register scratch) { int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; - LoadTaggedPointerField(scratch, - Operand(kJavaScriptCallCodeStartRegister, offset)); + LoadTaggedField(scratch, Operand(kJavaScriptCallCodeStartRegister, offset)); testl(FieldOperand(scratch, Code::kKindSpecificFlagsOffset), Immediate(1 << InstructionStream::kMarkedForDeoptimizationBit)); Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), RelocInfo::CODE_TARGET, not_zero); } -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, +void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { ASM_CODE_COMMENT(this); // Note: Assembler::call is used here on purpose to guarantee fixed-size - // exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific + // exits even on Atom CPUs; see MacroAssembler::Call for Atom-specific // performance tuning which emits a different instruction sequence. call(EntryFromBuiltinAsOperand(target)); DCHECK_EQ(SizeOfCodeGeneratedSince(exit), @@ -3434,8 +3393,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, : Deoptimizer::kEagerDeoptExitSize); } -void TurboAssembler::Trap() { int3(); } -void TurboAssembler::DebugBreak() { int3(); } +void MacroAssembler::Trap() { int3(); } +void MacroAssembler::DebugBreak() { int3(); } } // namespace internal } // namespace v8 diff --git a/src/codegen/x64/macro-assembler-x64.h b/src/codegen/x64/macro-assembler-x64.h index 5003555b00..f0cd23083d 100644 --- a/src/codegen/x64/macro-assembler-x64.h +++ b/src/codegen/x64/macro-assembler-x64.h @@ -55,10 +55,10 @@ class StackArgumentsAccessor { DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor); }; -class V8_EXPORT_PRIVATE TurboAssembler - : public SharedTurboAssemblerBase { +class V8_EXPORT_PRIVATE MacroAssembler + : public SharedMacroAssembler { public: - using SharedTurboAssemblerBase::SharedTurboAssemblerBase; + using SharedMacroAssembler::SharedMacroAssembler; void PushReturnAddressFrom(Register src) { pushq(src); } void PopReturnAddressTo(Register dst) { popq(dst); } @@ -583,35 +583,21 @@ class V8_EXPORT_PRIVATE TurboAssembler // --------------------------------------------------------------------------- // Pointer compression support - // Loads a field containing a HeapObject and decompresses it if pointer - // compression is enabled. - void LoadTaggedPointerField(Register destination, Operand field_operand); + // Loads a field containing any tagged value and decompresses it if necessary. + void LoadTaggedField(Register destination, Operand field_operand); - // Loads a field containing a HeapObject but does not decompress it when + // Loads a field containing any tagged value but does not decompress it when // pointer compression is enabled. - void LoadTaggedPointerField(TaggedRegister destination, - Operand field_operand); + void LoadTaggedField(TaggedRegister destination, Operand field_operand); // Loads a field containing a Smi and decompresses it if pointer compression // is enabled. void LoadTaggedSignedField(Register destination, Operand field_operand); - // Loads a field containing any tagged value and decompresses it if necessary. - void LoadAnyTaggedField(Register destination, Operand field_operand); - - // Loads a field containing any tagged value but does not decompress it when - // pointer compression is enabled. - void LoadAnyTaggedField(TaggedRegister destination, Operand field_operand); - - // Loads a field containing a HeapObject, decompresses it if necessary and - // pushes full pointer to the stack. When pointer compression is enabled, - // uses |scratch| to decompress the value. - void PushTaggedPointerField(Operand field_operand, Register scratch); - // Loads a field containing any tagged value, decompresses it if necessary and // pushes the full pointer to the stack. When pointer compression is enabled, // uses |scratch| to decompress the value. - void PushTaggedAnyField(Operand field_operand, Register scratch); + void PushTaggedField(Operand field_operand, Register scratch); // Loads a field containing smi value and untags it. void SmiUntagField(Register dst, Operand src); @@ -626,10 +612,9 @@ class V8_EXPORT_PRIVATE TurboAssembler // The following macros work even when pointer compression is not enabled. void DecompressTaggedSigned(Register destination, Operand field_operand); - void DecompressTaggedPointer(Register destination, Operand field_operand); - void DecompressTaggedPointer(Register destination, Register source); - void DecompressTaggedPointer(Register destination, Tagged_t immediate); - void DecompressAnyTagged(Register destination, Operand field_operand); + void DecompressTagged(Register destination, Operand field_operand); + void DecompressTagged(Register destination, Register source); + void DecompressTagged(Register destination, Tagged_t immediate); // --------------------------------------------------------------------------- // V8 Sandbox support @@ -653,23 +638,6 @@ class V8_EXPORT_PRIVATE TurboAssembler IsolateRootLocation isolateRootLocation = IsolateRootLocation::kInRootRegister); - protected: - static const int kSmiShift = kSmiTagSize + kSmiShiftSize; - - // Returns a register holding the smi value. The register MUST NOT be - // modified. It may be the "smi 1 constant" register. - Register GetSmiConstant(Smi value); - - // Drops arguments assuming that the return address was already popped. - void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger, - ArgumentsCountMode mode = kCountExcludesReceiver); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - // Loads and stores the value of an external reference. // Special case code for load and store to take advantage of // load_rax/store_rax if possible/necessary. @@ -781,7 +749,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // --------------------------------------------------------------------------- // Macro instructions. - using TurboAssembler::Cmp; void Cmp(Register dst, Handle source); void Cmp(Operand dst, Handle source); @@ -945,6 +912,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // In-place weak references. void LoadWeakValue(Register in_out, Label* target_if_cleared); + protected: + static const int kSmiShift = kSmiTagSize + kSmiShiftSize; + + // Returns a register holding the smi value. The register MUST NOT be + // modified. It may be the "smi 1 constant" register. + Register GetSmiConstant(Smi value); + + // Drops arguments assuming that the return address was already popped. + void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger, + ArgumentsCountMode mode = kCountExcludesReceiver); + private: // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, diff --git a/src/common/globals.h b/src/common/globals.h index d379cbfc61..50153945a7 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -2035,7 +2035,8 @@ enum IsolateAddressId { V(TrapNullDereference) \ V(TrapIllegalCast) \ V(TrapArrayOutOfBounds) \ - V(TrapArrayTooLarge) + V(TrapArrayTooLarge) \ + V(TrapStringOffsetOutOfBounds) enum KeyedAccessLoadMode { STANDARD_LOAD, diff --git a/src/common/ptr-compr-inl.h b/src/common/ptr-compr-inl.h index a6965c3671..f4590ef374 100644 --- a/src/common/ptr-compr-inl.h +++ b/src/common/ptr-compr-inl.h @@ -69,8 +69,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) { // static template -Address V8HeapCompressionScheme::DecompressTaggedPointer( - TOnHeapAddress on_heap_addr, Tagged_t raw_value) { +Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr, + Tagged_t raw_value) { #if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \ !defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE) V8_ASSUME((base_ & kPtrComprCageBaseMask) == base_); @@ -79,19 +79,15 @@ Address V8HeapCompressionScheme::DecompressTaggedPointer( // For V8_ASSUME_ALIGNED to be considered for optimizations the following // addition has to happen on a pointer type. Address result = reinterpret_cast
(cage_base + raw_value); - V8_ASSUME(static_cast(result) == raw_value); - return result; #else Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr); - return cage_base + static_cast
(raw_value); + Address result = cage_base + static_cast
(raw_value); #endif -} - -// static -template -Address V8HeapCompressionScheme::DecompressTaggedAny( - TOnHeapAddress on_heap_addr, Tagged_t raw_value) { - return DecompressTaggedPointer(on_heap_addr, raw_value); + // Allows to remove compress(decompress(...)) + V8_ASSUME(static_cast(result) == raw_value); + // Allows to remove SMI checks when the result is compared against a constant. + V8_ASSUME(HAS_SMI_TAG(result) == HAS_SMI_TAG(raw_value)); + return result; } // static @@ -102,10 +98,10 @@ void V8HeapCompressionScheme::ProcessIntermediatePointers( // If pointer compression is enabled, we may have random compressed pointers // on the stack that may be used for subsequent operations. // Extract, decompress and trace both halfwords. - Address decompressed_low = V8HeapCompressionScheme::DecompressTaggedPointer( + Address decompressed_low = V8HeapCompressionScheme::DecompressTagged( cage_base, static_cast(raw_value)); callback(decompressed_low); - Address decompressed_high = V8HeapCompressionScheme::DecompressTaggedPointer( + Address decompressed_high = V8HeapCompressionScheme::DecompressTagged( cage_base, static_cast(raw_value >> (sizeof(Tagged_t) * CHAR_BIT))); callback(decompressed_high); @@ -162,7 +158,7 @@ Address ExternalCodeCompressionScheme::DecompressTaggedSigned( // static template -Address ExternalCodeCompressionScheme::DecompressTaggedPointer( +Address ExternalCodeCompressionScheme::DecompressTagged( TOnHeapAddress on_heap_addr, Tagged_t raw_value) { #if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \ !defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE) @@ -172,19 +168,15 @@ Address ExternalCodeCompressionScheme::DecompressTaggedPointer( // For V8_ASSUME_ALIGNED to be considered for optimizations the following // addition has to happen on a pointer type. Address result = reinterpret_cast
(cage_base + raw_value); - V8_ASSUME(static_cast(result) == raw_value); - return result; #else Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr); - return cage_base + static_cast
(raw_value); + Address result = cage_base + static_cast
(raw_value); #endif -} - -// static -template -Address ExternalCodeCompressionScheme::DecompressTaggedAny( - TOnHeapAddress on_heap_addr, Tagged_t raw_value) { - return DecompressTaggedPointer(on_heap_addr, raw_value); + // Allows to remove compress(decompress(...)) + V8_ASSUME(static_cast(result) == raw_value); + // Allows to remove SMI checks when the result is compared against a constant. + V8_ASSUME(HAS_SMI_TAG(result) == HAS_SMI_TAG(raw_value)); + return result; } #endif // V8_EXTERNAL_CODE_SPACE @@ -223,15 +215,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) { // static template -Address V8HeapCompressionScheme::DecompressTaggedPointer( - TOnHeapAddress on_heap_addr, Tagged_t raw_value) { - UNREACHABLE(); -} - -// static -template -Address V8HeapCompressionScheme::DecompressTaggedAny( - TOnHeapAddress on_heap_addr, Tagged_t raw_value) { +Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr, + Tagged_t raw_value) { UNREACHABLE(); } diff --git a/src/common/ptr-compr.h b/src/common/ptr-compr.h index a58ac448c9..9483994885 100644 --- a/src/common/ptr-compr.h +++ b/src/common/ptr-compr.h @@ -29,15 +29,10 @@ class V8HeapCompressionScheme { // Decompresses smi value. V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value); - // Decompresses weak or strong heap object pointer or forwarding pointer, - // preserving both weak- and smi- tags. - template - V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr, - Tagged_t raw_value); // Decompresses any tagged value, preserving both weak- and smi- tags. template - V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr, - Tagged_t raw_value); + V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr, + Tagged_t raw_value); // Given a 64bit raw value, found on the stack, calls the callback function // with all possible pointers that may be "contained" in compressed form in @@ -82,15 +77,10 @@ class ExternalCodeCompressionScheme { // Decompresses smi value. V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value); - // Decompresses weak or strong heap object pointer or forwarding pointer, - // preserving both weak- and smi- tags. - template - V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr, - Tagged_t raw_value); // Decompresses any tagged value, preserving both weak- and smi- tags. template - V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr, - Tagged_t raw_value); + V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr, + Tagged_t raw_value); #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE // Process-wide cage base value used for decompression. diff --git a/src/compiler/backend/arm/code-generator-arm.cc b/src/compiler/backend/arm/code-generator-arm.cc index 4d7ab00bce..66ed969eb7 100644 --- a/src/compiler/backend/arm/code-generator-arm.cc +++ b/src/compiler/backend/arm/code-generator-arm.cc @@ -29,7 +29,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // Adds Arm-specific methods to convert InstructionOperands. class ArmOperandConverter final : public InstructionOperandConverter { @@ -415,7 +415,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -429,7 +429,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -473,7 +473,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { if (instr->InputAt(1)->IsImmediate()) { \ __ asm_imm(dt, dst, src, i.InputInt##width(1)); \ } else { \ - UseScratchRegisterScope temps(tasm()); \ + UseScratchRegisterScope temps(masm()); \ Simd128Register tmp = temps.AcquireQ(); \ Register shift = temps.Acquire(); \ constexpr int mask = (1 << width) - 1; \ @@ -493,7 +493,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { if (instr->InputAt(1)->IsImmediate()) { \ __ asm_imm(dt, dst, src, i.InputInt##width(1)); \ } else { \ - UseScratchRegisterScope temps(tasm()); \ + UseScratchRegisterScope temps(masm()); \ Simd128Register tmp = temps.AcquireQ(); \ Register shift = temps.Acquire(); \ constexpr int mask = (1 << width) - 1; \ @@ -518,20 +518,20 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void FlushPendingPushRegisters(TurboAssembler* tasm, +void FlushPendingPushRegisters(MacroAssembler* masm, FrameAccessState* frame_access_state, ZoneVector* pending_pushes) { switch (pending_pushes->size()) { case 0: break; case 1: - tasm->push((*pending_pushes)[0]); + masm->push((*pending_pushes)[0]); break; case 2: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]); + masm->Push((*pending_pushes)[0], (*pending_pushes)[1]); break; case 3: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1], + masm->Push((*pending_pushes)[0], (*pending_pushes)[1], (*pending_pushes)[2]); break; default: @@ -542,7 +542,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm, } void AdjustStackPointerForTailCall( - TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, + MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, ZoneVector* pending_pushes = nullptr, bool allow_shrinkage = true) { int current_sp_offset = state->GetSPToFPSlotCount() + @@ -550,15 +550,15 @@ void AdjustStackPointerForTailCall( int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); + masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); + masm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); state->IncreaseSPDelta(stack_slot_delta); } } @@ -601,7 +601,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); AdjustStackPointerForTailCall( - tasm(), frame_access_state(), + masm(), frame_access_state(), destination_location.index() - pending_pushes.size(), &pending_pushes); // Pushes of non-register data types are not supported. @@ -611,26 +611,26 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, // TODO(arm): We can push more than 3 registers at once. Add support in // the macro-assembler for pushing a list of registers. if (pending_pushes.size() == 3) { - FlushPendingPushRegisters(tasm(), frame_access_state(), + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } move->Eliminate(); } - FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes); + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, nullptr, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } // Check that {kJavaScriptCallCodeStartRegister} is correct. void CodeGenerator::AssembleCodeStartRegisterCheck() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ ComputeCodeStartAddress(scratch); __ cmp(scratch, kJavaScriptCallCodeStartRegister); @@ -645,7 +645,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. void CodeGenerator::BailoutIfDeoptimized() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; __ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); @@ -747,7 +747,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallJSFunction: { Register func = i.InputRegister(0); if (v8_flags.debug_code) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Check the function's context matches the context argument. __ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset)); @@ -858,7 +858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1069,7 +1069,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputRegister(2), i.OutputSBit()); break; case kArmMls: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), i.InputRegister(2)); DCHECK_EQ(LeaveCC, i.OutputSBit()); @@ -1093,13 +1093,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputRegister(1), i.OutputSBit()); break; case kArmSdiv: { - CpuFeatureScope scope(tasm(), SUDIV); + CpuFeatureScope scope(masm(), SUDIV); __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; } case kArmUdiv: { - CpuFeatureScope scope(tasm(), SUDIV); + CpuFeatureScope scope(masm(), SUDIV); __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; @@ -1127,20 +1127,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.OutputSBit()); break; case kArmBfc: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; } case kArmUbfx: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), i.InputInt8(2)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; } case kArmSbfx: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), i.InputInt8(2)); DCHECK_EQ(LeaveCC, i.OutputSBit()); @@ -1183,7 +1183,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(LeaveCC, i.OutputSBit()); break; case kArmRbit: { - CpuFeatureScope scope(tasm(), ARMv7); + CpuFeatureScope scope(masm(), ARMv7); __ rbit(i.OutputRegister(), i.InputRegister(0)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; @@ -1378,7 +1378,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmVmodF64: { // TODO(bmeurer): We should really get rid of this special instruction, // and generate a CallAddress instruction instead. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PrepareCallCFunction(0, 2); __ MovToFloatParameters(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); @@ -1398,7 +1398,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; case kArmVrintmF32: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); if (instr->InputAt(0)->IsSimd128Register()) { __ vrintm(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -1408,12 +1408,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVrintmF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } case kArmVrintpF32: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); if (instr->InputAt(0)->IsSimd128Register()) { __ vrintp(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -1423,12 +1423,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVrintpF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } case kArmVrintzF32: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); if (instr->InputAt(0)->IsSimd128Register()) { __ vrintz(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -1438,17 +1438,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVrintzF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } case kArmVrintaF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } case kArmVrintnF32: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); if (instr->InputAt(0)->IsSimd128Register()) { __ vrintn(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0)); @@ -1458,7 +1458,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVrintnF64: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); __ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); break; } @@ -1473,7 +1473,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtF32S32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vmov(scratch, i.InputRegister(0)); __ vcvt_f32_s32(i.OutputFloatRegister(), scratch); @@ -1481,7 +1481,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtF32U32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vmov(scratch, i.InputRegister(0)); __ vcvt_f32_u32(i.OutputFloatRegister(), scratch); @@ -1489,7 +1489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtF64S32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vmov(scratch, i.InputRegister(0)); __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch); @@ -1497,7 +1497,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtF64U32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vmov(scratch, i.InputRegister(0)); __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch); @@ -1505,7 +1505,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtS32F32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vcvt_s32_f32(scratch, i.InputFloatRegister(0)); __ vmov(i.OutputRegister(), scratch); @@ -1520,7 +1520,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtU32F32: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vcvt_u32_f32(scratch, i.InputFloatRegister(0)); __ vmov(i.OutputRegister(), scratch); @@ -1535,7 +1535,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtS32F64: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0)); __ vmov(i.OutputRegister(), scratch); @@ -1543,7 +1543,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVcvtU32F64: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister scratch = temps.AcquireS(); __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0)); __ vmov(i.OutputRegister(), scratch); @@ -1762,7 +1762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ vldr(i.OutputFloatRegister(), MemOperand(fp, offset)); } else { DCHECK_EQ(MachineRepresentation::kSimd128, op->representation()); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ add(scratch, fp, Operand(offset)); __ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()), @@ -1899,7 +1899,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } #undef ASSEMBLE_F64X2_ARITHMETIC_BINOP case kArmF64x2Eq: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ mov(scratch, Operand(0)); __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), @@ -1915,7 +1915,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Ne: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ mov(scratch, Operand(0)); __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), @@ -1931,7 +1931,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Lt: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), i.InputSimd128Register(1).low()); @@ -1947,7 +1947,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Le: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(), i.InputSimd128Register(1).low()); @@ -1989,7 +1989,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Ceil: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ vrintp(dst.low(), src.low()); @@ -1997,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Floor: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ vrintm(dst.low(), src.low()); @@ -2005,7 +2005,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2Trunc: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ vrintz(dst.low(), src.low()); @@ -2013,7 +2013,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmF64x2NearestInt: { - CpuFeatureScope scope(tasm(), ARMv8); + CpuFeatureScope scope(masm(), ARMv8); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ vrintn(dst.low(), src.low()); @@ -2060,7 +2060,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI64x2Mul: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); QwNeonRegister dst = i.OutputSimd128Register(); QwNeonRegister left = i.InputSimd128Register(0); QwNeonRegister right = i.InputSimd128Register(1); @@ -2447,7 +2447,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI32x4BitMask: { Register dst = i.OutputRegister(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register src = i.InputSimd128Register(0); Simd128Register tmp = temps.AcquireQ(); Simd128Register mask = i.TempSimd128Register(0); @@ -2468,7 +2468,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); Simd128Register tmp1 = i.TempSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); __ vmull(NeonS16, tmp1, lhs.low(), rhs.low()); __ vmull(NeonS16, scratch, lhs.high(), rhs.high()); @@ -2650,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI16x8BitMask: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register tmp = temps.AcquireQ(); @@ -2805,7 +2805,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmI8x16BitMask: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register tmp = temps.AcquireQ(); @@ -2906,7 +2906,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); DCHECK(dst == i.InputSimd128Register(0)); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7] __ vmov(scratch, src1); @@ -2917,7 +2917,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); DCHECK(dst == i.InputSimd128Register(0)); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft). __ vmov(scratch, src1); @@ -2928,7 +2928,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); DCHECK(dst == i.InputSimd128Register(0)); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); // src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7] __ vmov(scratch, src1); @@ -2961,7 +2961,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS32x4TransposeRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft). @@ -2990,7 +2990,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS16x8UnzipLeft: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15] @@ -3001,7 +3001,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS16x8UnzipRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped). @@ -3012,7 +3012,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS16x8TransposeLeft: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15] @@ -3023,7 +3023,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS16x8TransposeRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped). @@ -3052,7 +3052,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS8x16UnzipLeft: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31] @@ -3063,7 +3063,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS8x16UnzipRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped). @@ -3074,7 +3074,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS8x16TransposeLeft: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31] @@ -3085,7 +3085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmS8x16TransposeRight: { Simd128Register dst = i.OutputSimd128Register(), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); DCHECK(dst == i.InputSimd128Register(0)); // src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped). @@ -3112,7 +3112,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); DwVfpRegister table_base = src0.low(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Simd128Register scratch = temps.AcquireQ(); // If unary shuffle, table is src0 (2 d-registers), otherwise src0 and // src1. They must be consecutive. @@ -3163,7 +3163,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmV128AnyTrue: { const QwNeonRegister& src = i.InputSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister scratch = temps.AcquireD(); __ vpmax(NeonU32, scratch, src.low(), src.high()); __ vpmax(NeonU32, scratch, scratch, scratch); @@ -3178,7 +3178,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI32x4AllTrue: { const QwNeonRegister& src = i.InputSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister scratch = temps.AcquireD(); __ vpmin(NeonU32, scratch, src.low(), src.high()); __ vpmin(NeonU32, scratch, scratch, scratch); @@ -3189,7 +3189,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI16x8AllTrue: { const QwNeonRegister& src = i.InputSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister scratch = temps.AcquireD(); __ vpmin(NeonU16, scratch, src.low(), src.high()); __ vpmin(NeonU16, scratch, scratch, scratch); @@ -3201,7 +3201,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArmI8x16AllTrue: { const QwNeonRegister& src = i.InputSimd128Register(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister scratch = temps.AcquireD(); __ vpmin(NeonU8, scratch, src.low(), src.high()); __ vpmin(NeonU8, scratch, scratch, scratch); @@ -3747,7 +3747,7 @@ void CodeGenerator::AssembleConstructFrame() { // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ ldr(scratch, FieldMemOperand( kWasmInstanceRegister, @@ -3873,8 +3873,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ cmp(argc_reg, Operand(parameter_slots)); __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt); } - __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } else if (additional_pop_count->IsImmediate()) { DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type()); int additional_count = g.ToConstant(additional_pop_count).ToInt32(); @@ -3944,7 +3944,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else if (source->IsDoubleRegister()) { __ vstr(g.ToDoubleRegister(source), dst); } else { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register temp = temps.Acquire(); QwNeonRegister src = g.ToSimd128Register(source); __ add(temp, dst.rn(), Operand(dst.offset())); @@ -3965,7 +3965,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else if (source->IsDoubleStackSlot()) { __ vldr(g.ToDoubleRegister(destination), src); } else { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register temp = temps.Acquire(); QwNeonRegister dst = g.ToSimd128Register(destination); __ add(temp, src.rn(), Operand(src.offset())); @@ -3976,7 +3976,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, case MoveType::kStackToStack: { MemOperand src = g.ToMemOperand(source); MemOperand dst = g.ToMemOperand(destination); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (source->IsStackSlot() || source->IsFloatStackSlot()) { SwVfpRegister temp = temps.AcquireS(); __ vldr(temp, src); @@ -4014,27 +4014,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, Constant src = g.ToConstant(source); MemOperand dst = g.ToMemOperand(destination); if (destination->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); // Acquire a S register instead of a general purpose register in case // `vstr` needs one to compute the address of `dst`. SwVfpRegister s_temp = temps.AcquireS(); { // TODO(arm): This sequence could be optimized further if necessary by // writing the constant directly into `s_temp`. - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register temp = temps.Acquire(); MoveConstantToRegister(temp, src); __ vmov(s_temp, temp); } __ vstr(s_temp, dst); } else if (destination->IsFloatStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister temp = temps.AcquireS(); __ vmov(temp, Float32::FromBits(src.ToFloat32AsInt())); __ vstr(temp, dst); } else { DCHECK(destination->IsDoubleStackSlot()); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister temp = temps.AcquireD(); // TODO(arm): Look into optimizing this further if possible. Supporting // the NEON version of VMOV may help. @@ -4060,7 +4060,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ push(g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ ldr(scratch, g.ToMemOperand(source)); __ push(scratch); @@ -4083,7 +4083,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ pop(g.ToRegister(dest)); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ pop(scratch); __ str(scratch, g.ToMemOperand(dest)); @@ -4110,7 +4110,7 @@ void CodeGenerator::PopTempStackSlots() { void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick a // location to resolve the cycle. Re-include them immediately afterwards so @@ -4184,7 +4184,7 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) { InstructionOperand& destination = move->destination(); MoveType::Type move_type = MoveType::InferMove(&move->source(), &move->destination()); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (move_type == MoveType::kStackToStack) { if (source.IsStackSlot() || source.IsFloatStackSlot()) { SwVfpRegister temp = temps.AcquireS(); @@ -4224,7 +4224,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, DCHECK(destination->IsFloatRegister()); // GapResolver may give us reg codes that don't map to actual // s-registers. Generate code to work around those cases. - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); LowDwVfpRegister temp = temps.AcquireLowD(); int src_code = LocationOperand::cast(source)->register_code(); int dst_code = LocationOperand::cast(destination)->register_code(); @@ -4241,20 +4241,20 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, MemOperand dst = g.ToMemOperand(destination); if (source->IsRegister()) { Register src = g.ToRegister(source); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister temp = temps.AcquireS(); __ vmov(temp, src); __ ldr(src, dst); __ vstr(temp, dst); } else if (source->IsFloatRegister()) { int src_code = LocationOperand::cast(source)->register_code(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); LowDwVfpRegister temp = temps.AcquireLowD(); __ VmovExtended(temp.low().code(), src_code); __ VmovExtended(src_code, dst); __ vstr(temp.low(), dst); } else if (source->IsDoubleRegister()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister temp = temps.AcquireD(); DwVfpRegister src = g.ToDoubleRegister(source); __ Move(temp, src); @@ -4262,7 +4262,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, __ vstr(temp, dst); } else { QwNeonRegister src = g.ToSimd128Register(source); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register temp = temps.Acquire(); QwNeonRegister temp_q = temps.AcquireQ(); __ Move(temp_q, src); @@ -4276,7 +4276,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, MemOperand src = g.ToMemOperand(source); MemOperand dst = g.ToMemOperand(destination); if (source->IsStackSlot() || source->IsFloatStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); SwVfpRegister temp_0 = temps.AcquireS(); SwVfpRegister temp_1 = temps.AcquireS(); __ vldr(temp_0, dst); @@ -4284,7 +4284,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, __ vstr(temp_0, src); __ vstr(temp_1, dst); } else if (source->IsDoubleStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); LowDwVfpRegister temp = temps.AcquireLowD(); if (temps.CanAcquireD()) { DwVfpRegister temp_0 = temp; @@ -4317,7 +4317,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, MemOperand dst0 = dst; MemOperand src1(src.rn(), src.offset() + kDoubleSize); MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); DwVfpRegister temp_0 = temps.AcquireD(); DwVfpRegister temp_1 = temps.AcquireD(); __ vldr(temp_0, dst0); diff --git a/src/compiler/backend/arm/instruction-selector-arm.cc b/src/compiler/backend/arm/instruction-selector-arm.cc index 8733aff787..48e649051b 100644 --- a/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/src/compiler/backend/arm/instruction-selector-arm.cc @@ -397,7 +397,7 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode, if (int_matcher.HasResolvedValue()) { ptrdiff_t const delta = int_matcher.ResolvedValue() + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); input_count = 1; inputs[0] = g.UseImmediate(static_cast(delta)); @@ -753,7 +753,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node, if (int_matcher.HasResolvedValue()) { ptrdiff_t const delta = int_matcher.ResolvedValue() + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); int input_count = 2; InstructionOperand inputs[2]; diff --git a/src/compiler/backend/arm64/code-generator-arm64.cc b/src/compiler/backend/arm64/code-generator-arm64.cc index 764309f677..4254859b1a 100644 --- a/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/src/compiler/backend/arm64/code-generator-arm64.cc @@ -24,7 +24,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // Adds Arm64-specific methods to convert InstructionOperands. class Arm64OperandConverter final : public InstructionOperandConverter { @@ -238,13 +238,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter { UNREACHABLE(); } - MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const { + MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const { DCHECK_NOT_NULL(op); DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); - return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm); + return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm); } - MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const { + MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const { FrameOffset offset = frame_access_state()->GetFrameOffset(slot); if (offset.from_frame_pointer()) { int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset(); @@ -284,7 +284,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { void Generate() final { if (COMPRESS_POINTERS_BOOL) { - __ DecompressTaggedPointer(value_, value_); + __ DecompressTagged(value_, value_); } __ CheckPageFlag( value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, @@ -294,7 +294,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { : SaveFPRegsMode::kIgnore; if (must_save_lr_) { // We need to save and restore lr if the frame was elided. - __ Push(lr, padreg); + __ Push(lr, padreg); unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp); } if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { @@ -311,7 +311,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { __ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode); } if (must_save_lr_) { - __ Pop(padreg, lr); + __ Pop(padreg, lr); unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset()); } } @@ -459,14 +459,14 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, // Handles unary ops that work for float (scalar), double (scalar), or NEON. template -void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, +void EmitFpOrNeonUnop(MacroAssembler* masm, Fn fn, Instruction* instr, Arm64OperandConverter i, VectorFormat scalar, VectorFormat vector) { VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar; VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f); VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f); - (tasm->*fn)(output, input); + (masm->*fn)(output, input); } } // namespace @@ -539,13 +539,13 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, #define ASSEMBLE_IEEE754_BINOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ } while (0) #define ASSEMBLE_IEEE754_UNOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ } while (0) @@ -558,7 +558,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, __ asm_imm(i.OutputSimd128Register().format(), \ i.InputSimd128Register(0).format(), i.InputInt##width(1)); \ } else { \ - UseScratchRegisterScope temps(tasm()); \ + UseScratchRegisterScope temps(masm()); \ VRegister tmp = temps.AcquireQ(); \ Register shift = temps.Acquire##gp(); \ constexpr int mask = (1 << width) - 1; \ @@ -578,7 +578,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, __ asm_imm(i.OutputSimd128Register().format(), \ i.InputSimd128Register(0).format(), i.InputInt##width(1)); \ } else { \ - UseScratchRegisterScope temps(tasm()); \ + UseScratchRegisterScope temps(masm()); \ VRegister tmp = temps.AcquireQ(); \ Register shift = temps.Acquire##gp(); \ constexpr int mask = (1 << width) - 1; \ @@ -592,7 +592,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, void CodeGenerator::AssembleDeconstructFrame() { __ Mov(sp, fp); - __ Pop(fp, lr); + __ Pop(fp, lr); unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset()); } @@ -606,7 +606,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -615,10 +615,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, int stack_slot_delta = new_slot_above_sp - current_sp_offset; DCHECK_EQ(stack_slot_delta % 2, 0); if (stack_slot_delta > 0) { - tasm->Claim(stack_slot_delta); + masm->Claim(stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->Drop(-stack_slot_delta); + masm->Drop(-stack_slot_delta); state->IncreaseSPDelta(stack_slot_delta); } } @@ -627,14 +627,14 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { DCHECK_EQ(first_unused_slot_offset % 2, 0); - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); DCHECK(instr->IsTailCall()); InstructionOperandConverter g(this, instr); @@ -646,7 +646,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, // Check that {kJavaScriptCallCodeStartRegister} is correct. void CodeGenerator::AssembleCodeStartRegisterCheck() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ ComputeCodeStartAddress(scratch); __ cmp(scratch, kJavaScriptCallCodeStartRegister); @@ -705,7 +705,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Jump(wasm_code, constant.rmode()); } else { Register target = i.InputRegister(0); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); temps.Exclude(x17); __ Mov(x17, target); __ Jump(x17); @@ -737,7 +737,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); temps.Exclude(x17); __ Mov(x17, reg); __ Jump(x17); @@ -750,16 +750,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); if (v8_flags.debug_code) { // Check the function's context matches the context argument. - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); - __ LoadTaggedPointerField( - temp, FieldMemOperand(func, JSFunction::kContextOffset)); + __ LoadTaggedField(temp, + FieldMemOperand(func, JSFunction::kContextOffset)); __ cmp(cp, temp); __ Assert(eq, AbortReason::kWrongFunctionContext); } static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); - __ LoadTaggedPointerField(x2, - FieldMemOperand(func, JSFunction::kCodeOffset)); + __ LoadTaggedField(x2, FieldMemOperand(func, JSFunction::kCodeOffset)); __ CallCodeObject(x2); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -860,7 +859,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(BUILTIN_CODE(isolate(), AbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1051,39 +1050,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_IEEE754_UNOP(tanh); break; case kArm64Float32RoundDown: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatS, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatS, kFormat4S); break; case kArm64Float64RoundDown: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatD, kFormat2D); break; case kArm64Float32RoundUp: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatS, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatS, kFormat4S); break; case kArm64Float64RoundUp: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatD, kFormat2D); break; case kArm64Float64RoundTiesAway: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frinta, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frinta, instr, i, kFormatD, kFormat2D); break; case kArm64Float32RoundTruncate: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatS, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatS, kFormat4S); break; case kArm64Float64RoundTruncate: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatD, kFormat2D); break; case kArm64Float32RoundTiesEven: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatS, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatS, kFormat4S); break; case kArm64Float64RoundTiesEven: - EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatD, + EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatD, kFormat2D); break; case kArm64Add: @@ -1314,14 +1313,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1)); break; case kArm64Imod: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1)); __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); break; } case kArm64Imod32: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireW(); __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1)); __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), @@ -1329,14 +1328,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64Umod: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); __ Udiv(temp, i.InputRegister(0), i.InputRegister(1)); __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0)); break; } case kArm64Umod32: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireW(); __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1)); __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1), @@ -1650,7 +1649,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArm64Float64Mod: { // TODO(turbofan): implement directly. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); DCHECK_EQ(d0, i.InputDoubleRegister(0)); DCHECK_EQ(d1, i.InputDoubleRegister(1)); DCHECK_EQ(d0, i.OutputDoubleRegister()); @@ -1890,23 +1889,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64LdrDecompressTaggedSigned: __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand()); break; - case kArm64LdrDecompressTaggedPointer: - __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand()); - break; - case kArm64LdrDecompressAnyTagged: - __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand()); + case kArm64LdrDecompressTagged: + __ DecompressTagged(i.OutputRegister(), i.MemoryOperand()); break; case kArm64LdarDecompressTaggedSigned: __ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), i.TempRegister(0)); break; - case kArm64LdarDecompressTaggedPointer: - __ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0), - i.InputRegister(1), i.TempRegister(0)); - break; - case kArm64LdarDecompressAnyTagged: - __ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0), - i.InputRegister(1), i.TempRegister(0)); + case kArm64LdarDecompressTagged: + __ AtomicDecompressTagged(i.OutputRegister(), i.InputRegister(0), + i.InputRegister(1), i.TempRegister(0)); break; case kArm64LdrDecodeSandboxedPointer: __ LoadSandboxedPointerField(i.OutputRegister(), i.MemoryOperand()); @@ -2369,7 +2361,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add); SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub); case kArm64I64x2Mul: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister dst = i.OutputSimd128Register(); VRegister src1 = i.InputSimd128Register(0); VRegister src2 = i.InputSimd128Register(1); @@ -2470,7 +2462,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi); SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs); case kArm64I32x4BitMask: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register dst = i.OutputRegister32(); VRegister src = i.InputSimd128Register(0); VRegister tmp = scope.AcquireQ(); @@ -2486,7 +2478,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I32x4DotI16x8S: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister lhs = i.InputSimd128Register(0); VRegister rhs = i.InputSimd128Register(1); VRegister tmp1 = scope.AcquireV(kFormat4S); @@ -2497,7 +2489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I16x8DotI8x16S: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister lhs = i.InputSimd128Register(0); VRegister rhs = i.InputSimd128Register(1); VRegister tmp1 = scope.AcquireV(kFormat8H); @@ -2515,7 +2507,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputSimd128Register(1).V16B()); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister lhs = i.InputSimd128Register(0); VRegister rhs = i.InputSimd128Register(1); VRegister tmp1 = scope.AcquireV(kFormat8H); @@ -2553,7 +2545,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat4S); if (dst == src1) { __ Mov(temp, src1.V4S()); @@ -2574,7 +2566,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat4S); if (dst == src1) { __ Mov(temp, src1.V4S()); @@ -2588,7 +2580,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub); SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H); case kArm64I16x8BitMask: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register dst = i.OutputRegister32(); VRegister src = i.InputSimd128Register(0); VRegister tmp = scope.AcquireQ(); @@ -2615,7 +2607,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat8H); if (dst == src1) { __ Mov(temp, src1.V8H()); @@ -2633,7 +2625,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( VRegister dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat8H); if (dst == src1) { __ Mov(temp, src1.V8H()); @@ -2644,7 +2636,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64I8x16BitMask: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register dst = i.OutputRegister32(); VRegister src = i.InputSimd128Register(0); VRegister tmp = scope.AcquireQ(); @@ -2733,7 +2725,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( src1 = i.InputSimd128Register(1).V4S(); // Check for in-place shuffles. // If dst == src0 == src1, then the shuffle is unary and we only use src0. - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat4S); if (dst == src0) { __ Mov(temp, src0); @@ -2799,7 +2791,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0 : 0xE0E0E0E0E0E0E0E0)); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireV(kFormat16B); __ Movi(temp, imm2, imm1); @@ -2878,7 +2870,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArm64V128AnyTrue: { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); // For AnyTrue, the format does not matter; also, we would like to avoid // an expensive horizontal reduction. VRegister temp = scope.AcquireV(kFormat4S); @@ -2891,7 +2883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } #define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \ case Op: { \ - UseScratchRegisterScope scope(tasm()); \ + UseScratchRegisterScope scope(masm()); \ VRegister temp = scope.AcquireV(format); \ __ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \ __ Umov(i.OutputRegister32(), temp, 0); \ @@ -3045,7 +3037,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { Arm64OperandConverter i(this, instr); - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register input = i.InputRegister32(0); Register temp = scope.AcquireX(); size_t const case_count = instr->InputCount() - 2; @@ -3066,7 +3058,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { { const size_t instruction_count = case_count * instructions_per_case + instructions_per_jump_target; - TurboAssembler::BlockPoolsScope block_pools(tasm(), + MacroAssembler::BlockPoolsScope block_pools(masm(), instruction_count * kInstrSize); __ Bind(&table); for (size_t index = 0; index < case_count; ++index) { @@ -3125,10 +3117,10 @@ void CodeGenerator::AssembleConstructFrame() { DCHECK_EQ(required_slots % 2, 1); __ Prologue(); // Update required_slots count since we have just claimed one extra slot. - static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1); - required_slots -= TurboAssembler::kExtraSlotClaimedByPrologue; + static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1); + required_slots -= MacroAssembler::kExtraSlotClaimedByPrologue; } else { - __ Push(lr, fp); + __ Push(lr, fp); __ Mov(fp, sp); } unwinding_info_writer_.MarkFrameConstructed(__ pc_offset()); @@ -3151,7 +3143,7 @@ void CodeGenerator::AssembleConstructFrame() { // One unoptimized frame slot has already been claimed when the actual // arguments count was pushed. required_slots -= - unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue; + unoptimized_frame_slots - MacroAssembler::kExtraSlotClaimedByPrologue; } #if V8_ENABLE_WEBASSEMBLY @@ -3165,7 +3157,7 @@ void CodeGenerator::AssembleConstructFrame() { // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register scratch = scope.AcquireX(); __ Ldr(scratch, FieldMemOperand( kWasmInstanceRegister, @@ -3178,7 +3170,7 @@ void CodeGenerator::AssembleConstructFrame() { { // Finish the frame that hasn't been fully built yet. - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); @@ -3209,7 +3201,7 @@ void CodeGenerator::AssembleConstructFrame() { __ Claim(required_slots); break; case CallDescriptor::kCallCodeObject: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); @@ -3225,7 +3217,7 @@ void CodeGenerator::AssembleConstructFrame() { } #if V8_ENABLE_WEBASSEMBLY case CallDescriptor::kCallWasmFunction: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); @@ -3235,7 +3227,7 @@ void CodeGenerator::AssembleConstructFrame() { } case CallDescriptor::kCallWasmImportWrapper: case CallDescriptor::kCallWasmCapiFunction: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(info()->GetOutputStackFrameType())); @@ -3254,7 +3246,7 @@ void CodeGenerator::AssembleConstructFrame() { case CallDescriptor::kCallAddress: #if V8_ENABLE_WEBASSEMBLY if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY)); __ Push(scratch, padreg); @@ -3392,7 +3384,7 @@ void CodeGenerator::PrepareForDeoptimizationExits( } // Emit the jumps to deoptimization entries. - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register scratch = scope.AcquireX(); static_assert(static_cast(kFirstDeoptimizeKind) == 0); for (int i = 0; i < kDeoptimizeKindCount; i++) { @@ -3417,9 +3409,9 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ Push(padreg, g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); - __ Ldr(scratch, g.ToMemOperand(source, tasm())); + __ Ldr(scratch, g.ToMemOperand(source, masm())); __ Push(padreg, scratch); frame_access_state()->IncreaseSPDelta(new_slots); } else { @@ -3440,10 +3432,10 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ Pop(g.ToRegister(dest), padreg); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.AcquireX(); __ Pop(scratch, padreg); - __ Str(scratch, g.ToMemOperand(dest, tasm())); + __ Str(scratch, g.ToMemOperand(dest, masm())); } else { int last_frame_slot_id = frame_access_state_->frame()->GetTotalFrameSlotCount() - 1; @@ -3468,7 +3460,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. DCHECK(!source->IsImmediate()); - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick one to // resolve the move cycle. Re-include them immediately afterwards as they @@ -3506,7 +3498,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, scratch_reg.code()); Arm64OperandConverter g(this, nullptr); if (source->IsStackSlot()) { - __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, tasm())); + __ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, masm())); } else { DCHECK(source->IsRegister()); __ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source)); @@ -3535,7 +3527,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest, move_cycle_.scratch_reg->code()); Arm64OperandConverter g(this, nullptr); if (dest->IsStackSlot()) { - __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, tasm())); + __ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, masm())); } else { DCHECK(dest->IsRegister()); __ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch)); @@ -3557,9 +3549,9 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) { auto move_type = MoveType::InferMove(&move->source(), &move->destination()); if (move_type == MoveType::kStackToStack) { Arm64OperandConverter g(this, nullptr); - MemOperand src = g.ToMemOperand(&move->source(), tasm()); - MemOperand dst = g.ToMemOperand(&move->destination(), tasm()); - UseScratchRegisterScope temps(tasm()); + MemOperand src = g.ToMemOperand(&move->source(), masm()); + MemOperand dst = g.ToMemOperand(&move->destination(), masm()); + UseScratchRegisterScope temps(masm()); if (move->source().IsSimd128StackSlot()) { VRegister temp = temps.AcquireQ(); move_cycle_.scratch_fp_regs.set(temp); @@ -3574,11 +3566,11 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) { // Offset doesn't fit into the immediate field so the assembler will emit // two instructions and use a second temp register. if ((src.IsImmediateOffset() && - !tasm()->IsImmLSScaled(src_offset, src_size) && - !tasm()->IsImmLSUnscaled(src_offset)) || + !masm()->IsImmLSScaled(src_offset, src_size) && + !masm()->IsImmLSUnscaled(src_offset)) || (dst.IsImmediateOffset() && - !tasm()->IsImmLSScaled(dst_offset, dst_size) && - !tasm()->IsImmLSUnscaled(dst_offset))) { + !masm()->IsImmLSScaled(dst_offset, dst_size) && + !masm()->IsImmLSUnscaled(dst_offset))) { Register temp = temps.AcquireX(); move_cycle_.scratch_regs.set(temp); } @@ -3627,7 +3619,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } return; case MoveType::kRegisterToStack: { - MemOperand dst = g.ToMemOperand(destination, tasm()); + MemOperand dst = g.ToMemOperand(destination, masm()); if (source->IsRegister()) { __ Str(g.ToRegister(source), dst); } else { @@ -3642,7 +3634,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, return; } case MoveType::kStackToRegister: { - MemOperand src = g.ToMemOperand(source, tasm()); + MemOperand src = g.ToMemOperand(source, masm()); if (destination->IsRegister()) { __ Ldr(g.ToRegister(destination), src); } else { @@ -3657,15 +3649,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, return; } case MoveType::kStackToStack: { - MemOperand src = g.ToMemOperand(source, tasm()); - MemOperand dst = g.ToMemOperand(destination, tasm()); + MemOperand src = g.ToMemOperand(source, masm()); + MemOperand dst = g.ToMemOperand(destination, masm()); if (source->IsSimd128StackSlot()) { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireQ(); __ Ldr(temp, src); __ Str(temp, dst); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); __ Ldr(temp, src); __ Str(temp, dst); @@ -3689,9 +3681,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } case MoveType::kConstantToStack: { Constant src = g.ToConstant(source); - MemOperand dst = g.ToMemOperand(destination, tasm()); + MemOperand dst = g.ToMemOperand(destination, masm()); if (destination->IsStackSlot()) { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp = scope.AcquireX(); MoveConstantToRegister(temp, src); __ Str(temp, dst); @@ -3699,7 +3691,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, if (base::bit_cast(src.ToFloat32()) == 0) { __ Str(wzr, dst); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireS(); __ Fmov(temp, src.ToFloat32()); __ Str(temp, dst); @@ -3709,7 +3701,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, if (src.ToFloat64().AsUint64() == 0) { __ Str(xzr, dst); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister temp = scope.AcquireD(); __ Fmov(temp, src.ToFloat64().value()); __ Str(temp, dst); @@ -3740,8 +3732,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, } return; case MoveType::kRegisterToStack: { - UseScratchRegisterScope scope(tasm()); - MemOperand dst = g.ToMemOperand(destination, tasm()); + UseScratchRegisterScope scope(masm()); + MemOperand dst = g.ToMemOperand(destination, masm()); if (source->IsRegister()) { Register temp = scope.AcquireX(); Register src = g.ToRegister(source); @@ -3749,7 +3741,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, __ Ldr(src, dst); __ Str(temp, dst); } else { - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); VRegister src = g.ToDoubleRegister(source); if (source->IsFloatRegister() || source->IsDoubleRegister()) { VRegister temp = scope.AcquireD(); @@ -3767,9 +3759,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, return; } case MoveType::kStackToStack: { - UseScratchRegisterScope scope(tasm()); - MemOperand src = g.ToMemOperand(source, tasm()); - MemOperand dst = g.ToMemOperand(destination, tasm()); + UseScratchRegisterScope scope(masm()); + MemOperand src = g.ToMemOperand(source, masm()); + MemOperand dst = g.ToMemOperand(destination, masm()); VRegister temp_0 = scope.AcquireD(); VRegister temp_1 = scope.AcquireD(); if (source->IsSimd128StackSlot()) { diff --git a/src/compiler/backend/arm64/instruction-codes-arm64.h b/src/compiler/backend/arm64/instruction-codes-arm64.h index 6c4eafa1d9..05e2d8a509 100644 --- a/src/compiler/backend/arm64/instruction-codes-arm64.h +++ b/src/compiler/backend/arm64/instruction-codes-arm64.h @@ -199,11 +199,9 @@ namespace compiler { V(Arm64Float64MoveU64) \ V(Arm64U64MoveFloat64) \ V(Arm64LdrDecompressTaggedSigned) \ - V(Arm64LdrDecompressTaggedPointer) \ - V(Arm64LdrDecompressAnyTagged) \ + V(Arm64LdrDecompressTagged) \ V(Arm64LdarDecompressTaggedSigned) \ - V(Arm64LdarDecompressTaggedPointer) \ - V(Arm64LdarDecompressAnyTagged) \ + V(Arm64LdarDecompressTagged) \ V(Arm64StrCompressTagged) \ V(Arm64StlrCompressTagged) \ V(Arm64LdrDecodeSandboxedPointer) \ diff --git a/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/src/compiler/backend/arm64/instruction-scheduler-arm64.cc index eba6cdf75e..dcfb0151f3 100644 --- a/src/compiler/backend/arm64/instruction-scheduler-arm64.cc +++ b/src/compiler/backend/arm64/instruction-scheduler-arm64.cc @@ -315,11 +315,9 @@ int InstructionScheduler::GetTargetInstructionFlags( case kArm64LdrW: case kArm64Ldr: case kArm64LdrDecompressTaggedSigned: - case kArm64LdrDecompressTaggedPointer: - case kArm64LdrDecompressAnyTagged: + case kArm64LdrDecompressTagged: case kArm64LdarDecompressTaggedSigned: - case kArm64LdarDecompressTaggedPointer: - case kArm64LdarDecompressAnyTagged: + case kArm64LdarDecompressTagged: case kArm64LdrDecodeSandboxedPointer: case kArm64Peek: case kArm64LoadSplat: @@ -431,8 +429,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { return 1; case kArm64LdrDecompressTaggedSigned: - case kArm64LdrDecompressTaggedPointer: - case kArm64LdrDecompressAnyTagged: + case kArm64LdrDecompressTagged: case kArm64Ldr: case kArm64LdrD: case kArm64LdrS: diff --git a/src/compiler/backend/arm64/instruction-selector-arm64.cc b/src/compiler/backend/arm64/instruction-selector-arm64.cc index 5c0c6415c5..a68536fb3e 100644 --- a/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -623,7 +623,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); input_count = 1; // Check that the delta is a 32-bit integer due to the limitations of @@ -843,11 +843,8 @@ void InstructionSelector::VisitLoad(Node* node) { immediate_mode = kLoadStoreImm32; break; case MachineRepresentation::kTaggedPointer: - opcode = kArm64LdrDecompressTaggedPointer; - immediate_mode = kLoadStoreImm32; - break; case MachineRepresentation::kTagged: - opcode = kArm64LdrDecompressAnyTagged; + opcode = kArm64LdrDecompressTagged; immediate_mode = kLoadStoreImm32; break; #else @@ -988,7 +985,7 @@ void InstructionSelector::VisitStore(Node* node) { CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( isolate(), m.ResolvedValue()); if (is_int32(delta)) { input_count = 2; @@ -2773,10 +2770,10 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node, code = kArm64LdarDecompressTaggedSigned; break; case MachineRepresentation::kTaggedPointer: - code = kArm64LdarDecompressTaggedPointer; + code = kArm64LdarDecompressTagged; break; case MachineRepresentation::kTagged: - code = kArm64LdarDecompressAnyTagged; + code = kArm64LdarDecompressTagged; break; #else case MachineRepresentation::kTaggedSigned: // Fall through. diff --git a/src/compiler/backend/code-generator-impl.h b/src/compiler/backend/code-generator-impl.h index b8238a36a7..4a2c770b88 100644 --- a/src/compiler/backend/code-generator-impl.h +++ b/src/compiler/backend/code-generator-impl.h @@ -266,14 +266,14 @@ class OutOfLineCode : public ZoneObject { Label* entry() { return &entry_; } Label* exit() { return &exit_; } const Frame* frame() const { return frame_; } - TurboAssembler* tasm() { return tasm_; } + MacroAssembler* masm() { return masm_; } OutOfLineCode* next() const { return next_; } private: Label entry_; Label exit_; const Frame* const frame_; - TurboAssembler* const tasm_; + MacroAssembler* const masm_; OutOfLineCode* const next_; }; diff --git a/src/compiler/backend/code-generator.cc b/src/compiler/backend/code-generator.cc index 4d747d7fc5..ee75ca27bc 100644 --- a/src/compiler/backend/code-generator.cc +++ b/src/compiler/backend/code-generator.cc @@ -64,7 +64,7 @@ CodeGenerator::CodeGenerator( current_block_(RpoNumber::Invalid()), start_source_position_(start_source_position), current_source_position_(SourcePosition::Unknown()), - tasm_(isolate, options, CodeObjectRequired::kNo, + masm_(isolate, options, CodeObjectRequired::kNo, #if V8_ENABLE_WEBASSEMBLY buffer_cache ? buffer_cache->GetAssemblerBuffer( AssemblerBase::kDefaultBufferSize) @@ -98,15 +98,15 @@ CodeGenerator::CodeGenerator( } CreateFrameAccessState(frame); CHECK_EQ(info->is_osr(), osr_helper_.has_value()); - tasm_.set_jump_optimization_info(jump_opt); + masm_.set_jump_optimization_info(jump_opt); CodeKind code_kind = info->code_kind(); if (code_kind == CodeKind::WASM_FUNCTION || code_kind == CodeKind::WASM_TO_CAPI_FUNCTION || code_kind == CodeKind::WASM_TO_JS_FUNCTION || code_kind == CodeKind::JS_TO_WASM_FUNCTION) { - tasm_.set_abort_hard(true); + masm_.set_abort_hard(true); } - tasm_.set_builtin(builtin); + masm_.set_builtin(builtin); } bool CodeGenerator::wasm_runtime_exception_support() const { @@ -173,19 +173,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( Label* jump_deoptimization_entry_label = &jump_deoptimization_entry_labels_[static_cast(deopt_kind)]; if (info()->source_positions()) { - tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(), + masm()->RecordDeoptReason(deoptimization_reason, exit->node_id(), exit->pos(), deoptimization_id); } if (deopt_kind == DeoptimizeKind::kLazy) { ++lazy_deopt_count_; - tasm()->BindExceptionHandler(exit->label()); + masm()->BindExceptionHandler(exit->label()); } else { ++eager_deopt_count_; - tasm()->bind(exit->label()); + masm()->bind(exit->label()); } Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind); - tasm()->CallForDeoptimization(target, deoptimization_id, exit->label(), + masm()->CallForDeoptimization(target, deoptimization_id, exit->label(), deopt_kind, exit->continue_label(), jump_deoptimization_entry_label); @@ -195,7 +195,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( } void CodeGenerator::MaybeEmitOutOfLineConstantPool() { - tasm()->MaybeEmitOutOfLineConstantPool(); + masm()->MaybeEmitOutOfLineConstantPool(); } void CodeGenerator::AssembleCode() { @@ -204,27 +204,27 @@ void CodeGenerator::AssembleCode() { // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done in AssemblePrologue). - FrameScope frame_scope(tasm(), StackFrame::MANUAL); + FrameScope frame_scope(masm(), StackFrame::MANUAL); if (info->source_positions()) { AssembleSourcePosition(start_source_position()); } - offsets_info_.code_start_register_check = tasm()->pc_offset(); + offsets_info_.code_start_register_check = masm()->pc_offset(); - tasm()->CodeEntry(); + masm()->CodeEntry(); // Check that {kJavaScriptCallCodeStartRegister} has been set correctly. if (v8_flags.debug_code && info->called_with_code_start_register()) { - tasm()->RecordComment("-- Prologue: check code start register --"); + masm()->RecordComment("-- Prologue: check code start register --"); AssembleCodeStartRegisterCheck(); } - offsets_info_.deopt_check = tasm()->pc_offset(); + offsets_info_.deopt_check = masm()->pc_offset(); // We want to bailout only from JS functions, which are the only ones // that are optimized. if (info->IsOptimizing()) { DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall()); - tasm()->RecordComment("-- Prologue: check for deoptimization --"); + masm()->RecordComment("-- Prologue: check for deoptimization --"); BailoutIfDeoptimized(); } @@ -258,22 +258,22 @@ void CodeGenerator::AssembleCode() { instr_starts_.assign(instructions()->instructions().size(), {}); } // Assemble instructions in assembly order. - offsets_info_.blocks_start = tasm()->pc_offset(); + offsets_info_.blocks_start = masm()->pc_offset(); for (const InstructionBlock* block : instructions()->ao_blocks()) { // Align loop headers on vendor recommended boundaries. - if (!tasm()->jump_optimization_info()) { + if (!masm()->jump_optimization_info()) { if (block->ShouldAlignLoopHeader()) { - tasm()->LoopHeaderAlign(); + masm()->LoopHeaderAlign(); } else if (block->ShouldAlignCodeTarget()) { - tasm()->CodeTargetAlign(); + masm()->CodeTargetAlign(); } } if (info->trace_turbo_json()) { - block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset(); + block_starts_[block->rpo_number().ToInt()] = masm()->pc_offset(); } // Bind a label for a block. current_block_ = block->rpo_number(); - unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block); + unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block); if (v8_flags.code_comments) { std::ostringstream buffer; buffer << "-- B" << block->rpo_number().ToInt() << " start"; @@ -289,12 +289,12 @@ void CodeGenerator::AssembleCode() { buffer << " (in loop " << block->loop_header().ToInt() << ")"; } buffer << " --"; - tasm()->RecordComment(buffer.str().c_str()); + masm()->RecordComment(buffer.str().c_str()); } frame_access_state()->MarkHasFrame(block->needs_frame()); - tasm()->bind(GetLabel(current_block_)); + masm()->bind(GetLabel(current_block_)); if (block->must_construct_frame()) { AssembleConstructFrame(); @@ -303,7 +303,7 @@ void CodeGenerator::AssembleCode() { // using the roots. // TODO(mtrofin): investigate how we can avoid doing this repeatedly. if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) { - tasm()->InitializeRootRegister(); + masm()->InitializeRootRegister(); } } #ifdef V8_TARGET_ARCH_RISCV64 @@ -312,10 +312,10 @@ void CodeGenerator::AssembleCode() { // back between blocks. the Rvv instruction may get an incorrect vtype. so // here VectorUnit needs to be cleared to ensure that the vtype is correct // within the block. - tasm()->VU.clear(); + masm()->VU.clear(); #endif if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) { - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); result_ = AssembleBlock(block); } else { result_ = AssembleBlock(block); @@ -325,29 +325,29 @@ void CodeGenerator::AssembleCode() { } // Assemble all out-of-line code. - offsets_info_.out_of_line_code = tasm()->pc_offset(); + offsets_info_.out_of_line_code = masm()->pc_offset(); if (ools_) { - tasm()->RecordComment("-- Out of line code --"); + masm()->RecordComment("-- Out of line code --"); for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) { - tasm()->bind(ool->entry()); + masm()->bind(ool->entry()); ool->Generate(); - if (ool->exit()->is_bound()) tasm()->jmp(ool->exit()); + if (ool->exit()->is_bound()) masm()->jmp(ool->exit()); } } // This nop operation is needed to ensure that the trampoline is not // confused with the pc of the call before deoptimization. // The test regress/regress-259 is an example of where we need it. - tasm()->nop(); + masm()->nop(); // For some targets, we must make sure that constant and veneer pools are // emitted before emitting the deoptimization exits. PrepareForDeoptimizationExits(&deoptimization_exits_); - deopt_exit_start_offset_ = tasm()->pc_offset(); + deopt_exit_start_offset_ = masm()->pc_offset(); // Assemble deoptimization exits. - offsets_info_.deoptimization_exits = tasm()->pc_offset(); + offsets_info_.deoptimization_exits = masm()->pc_offset(); int last_updated = 0; // We sort the deoptimization exits here so that the lazy ones will be visited // last. We need this as lazy deopts might need additional instructions. @@ -367,7 +367,7 @@ void CodeGenerator::AssembleCode() { { #ifdef V8_TARGET_ARCH_PPC64 v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( - tasm()); + masm()); #endif for (DeoptimizationExit* exit : deoptimization_exits_) { if (exit->emitted()) continue; @@ -388,19 +388,19 @@ void CodeGenerator::AssembleCode() { } } - offsets_info_.pools = tasm()->pc_offset(); + offsets_info_.pools = masm()->pc_offset(); // TODO(jgruber): Move all inlined metadata generation into a new, // architecture-independent version of FinishCode. Currently, this includes // the safepoint table, handler table, constant pool, and code comments, in // that order. FinishCode(); - offsets_info_.jump_tables = tasm()->pc_offset(); + offsets_info_.jump_tables = masm()->pc_offset(); // Emit the jump tables. if (jump_tables_) { - tasm()->Align(kSystemPointerSize); + masm()->Align(kSystemPointerSize); for (JumpTable* table = jump_tables_; table; table = table->next()) { - tasm()->bind(table->label()); + masm()->bind(table->label()); AssembleJumpTable(table->targets(), table->target_count()); } } @@ -408,34 +408,35 @@ void CodeGenerator::AssembleCode() { // The LinuxPerfJitLogger logs code up until here, excluding the safepoint // table. Resolve the unwinding info now so it is aware of the same code // size as reported by perf. - unwinding_info_writer_.Finish(tasm()->pc_offset()); + unwinding_info_writer_.Finish(masm()->pc_offset()); // Final alignment before starting on the metadata section. - tasm()->Align(InstructionStream::kMetadataAlignment); + masm()->Align(InstructionStream::kMetadataAlignment); - safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount()); + safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount()); // Emit the exception handler table. if (!handlers_.empty()) { - handler_table_offset_ = HandlerTable::EmitReturnTableStart(tasm()); + handler_table_offset_ = HandlerTable::EmitReturnTableStart(masm()); for (size_t i = 0; i < handlers_.size(); ++i) { - HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset, + HandlerTable::EmitReturnEntry(masm(), handlers_[i].pc_offset, handlers_[i].handler->pos()); } } - tasm()->MaybeEmitOutOfLineConstantPool(); - tasm()->FinalizeJumpOptimizationInfo(); + masm()->MaybeEmitOutOfLineConstantPool(); + masm()->FinalizeJumpOptimizationInfo(); result_ = kSuccess; } +#ifndef V8_TARGET_ARCH_X64 void CodeGenerator::AssembleArchBinarySearchSwitchRange( Register input, RpoNumber def_block, std::pair* begin, std::pair* end) { if (end - begin < kBinarySearchSwitchMinimalCases) { while (begin != end) { - tasm()->JumpIfEqual(input, begin->first, begin->second); + masm()->JumpIfEqual(input, begin->first, begin->second); ++begin; } AssembleArchJumpRegardlessOfAssemblyOrder(def_block); @@ -443,11 +444,12 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange( } auto middle = begin + (end - begin) / 2; Label less_label; - tasm()->JumpIfLessThan(input, middle->first, &less_label); + masm()->JumpIfLessThan(input, middle->first, &less_label); AssembleArchBinarySearchSwitchRange(input, def_block, middle, end); - tasm()->bind(&less_label); + masm()->bind(&less_label); AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle); } +#endif // V8_TARGET_ARCH_X64 void CodeGenerator::AssembleArchJump(RpoNumber target) { if (!IsNextInAssemblyOrder(target)) @@ -469,7 +471,7 @@ base::OwnedVector CodeGenerator::GetProtectedInstructionsData() { MaybeHandle CodeGenerator::FinalizeCode() { if (result_ != kSuccess) { - tasm()->AbortedCodeGeneration(); + masm()->AbortedCodeGeneration(); return {}; } @@ -482,11 +484,11 @@ MaybeHandle CodeGenerator::FinalizeCode() { // Allocate and install the code. CodeDesc desc; - tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_); + masm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_); #if defined(V8_OS_WIN64) if (Builtins::IsBuiltinId(info_->builtin())) { - isolate_->SetBuiltinUnwindData(info_->builtin(), tasm()->GetUnwindInfo()); + isolate_->SetBuiltinUnwindData(info_->builtin(), masm()->GetUnwindInfo()); } #endif // V8_OS_WIN64 @@ -508,7 +510,7 @@ MaybeHandle CodeGenerator::FinalizeCode() { Handle code; if (!maybe_code.ToHandle(&code)) { - tasm()->AbortedCodeGeneration(); + masm()->AbortedCodeGeneration(); return {}; } @@ -527,7 +529,7 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const { } void CodeGenerator::RecordSafepoint(ReferenceMap* references) { - auto safepoint = safepoints()->DefineSafepoint(tasm()); + auto safepoint = safepoints()->DefineSafepoint(masm()); int frame_header_offset = frame()->GetFixedSlotCount(); for (const InstructionOperand& operand : references->reference_operands()) { if (operand.IsStackSlot()) { @@ -558,7 +560,7 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle object, CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock( const InstructionBlock* block) { if (block->IsHandler()) { - tasm()->ExceptionHandler(); + masm()->ExceptionHandler(); } for (int i = block->code_start(); i < block->code_end(); ++i) { CodeGenResult result = AssembleInstruction(i, block); @@ -718,7 +720,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( int instruction_index, const InstructionBlock* block) { Instruction* instr = instructions()->InstructionAt(instruction_index); if (info()->trace_turbo_json()) { - instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset(); + instr_starts_[instruction_index].gap_pc_offset = masm()->pc_offset(); } int first_unused_stack_slot; FlagsMode mode = FlagsModeField::decode(instr->opcode()); @@ -738,14 +740,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( AssembleDeconstructFrame(); } if (info()->trace_turbo_json()) { - instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset(); + instr_starts_[instruction_index].arch_instr_pc_offset = masm()->pc_offset(); } // Assemble architecture-specific code for the instruction. CodeGenResult result = AssembleArchInstruction(instr); if (result != kSuccess) return result; if (info()->trace_turbo_json()) { - instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset(); + instr_starts_[instruction_index].condition_pc_offset = masm()->pc_offset(); } FlagsCondition condition = FlagsConditionField::decode(instr->opcode()); @@ -779,7 +781,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( branch.false_label = exit->continue_label(); branch.fallthru = true; AssembleArchDeoptBranch(instr, &branch); - tasm()->bind(exit->continue_label()); + masm()->bind(exit->continue_label()); break; } case kFlags_set: { @@ -818,7 +820,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) { if (source_position == current_source_position_) return; current_source_position_ = source_position; if (!source_position.IsKnown()) return; - source_position_table_builder_.AddPosition(tasm()->pc_offset(), + source_position_table_builder_.AddPosition(masm()->pc_offset(), source_position, false); if (v8_flags.code_comments) { OptimizedCompilationInfo* info = this->info(); @@ -833,8 +835,8 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) { buffer << "-- "; // Turbolizer only needs the source position, as it can reconstruct // the inlining stack from other information. - if (info->trace_turbo_json() || !tasm()->isolate() || - tasm()->isolate()->concurrent_recompilation_enabled()) { + if (info->trace_turbo_json() || !masm()->isolate() || + masm()->isolate()->concurrent_recompilation_enabled()) { buffer << source_position; } else { AllowGarbageCollection allocation; @@ -843,7 +845,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) { buffer << source_position.InliningStack(info); } buffer << " --"; - tasm()->RecordComment(buffer.str().c_str()); + masm()->RecordComment(buffer.str().c_str()); } } @@ -981,7 +983,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) { RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1); DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler()); handlers_.push_back( - {GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()}); + {GetLabel(handler_rpo), masm()->pc_offset_for_safepoint()}); } if (needs_frame_state) { @@ -991,7 +993,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) { size_t frame_state_offset = 1; FrameStateDescriptor* descriptor = GetDeoptimizationEntry(instr, frame_state_offset).descriptor(); - int pc_offset = tasm()->pc_offset_for_safepoint(); + int pc_offset = masm()->pc_offset_for_safepoint(); BuildTranslation(instr, pc_offset, frame_state_offset, 0, descriptor->state_combine()); } @@ -1325,7 +1327,7 @@ void CodeGenerator::AddTranslationForOperand(Instruction* instr, } void CodeGenerator::MarkLazyDeoptSite() { - last_lazy_deopt_pc_ = tasm()->pc_offset(); + last_lazy_deopt_pc_ = masm()->pc_offset(); } DeoptimizationExit* CodeGenerator::AddDeoptimizationExit( @@ -1336,7 +1338,7 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit( } OutOfLineCode::OutOfLineCode(CodeGenerator* gen) - : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) { + : frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) { gen->ools_ = this; } diff --git a/src/compiler/backend/code-generator.h b/src/compiler/backend/code-generator.h index 288d67f4df..20cc045b11 100644 --- a/src/compiler/backend/code-generator.h +++ b/src/compiler/backend/code-generator.h @@ -188,7 +188,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { void RecordSafepoint(ReferenceMap* references); Zone* zone() const { return zone_; } - TurboAssembler* tasm() { return &tasm_; } + MacroAssembler* masm() { return &masm_; } SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; } size_t handler_table_offset() const { return handler_table_offset_; } @@ -278,9 +278,15 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { #if V8_ENABLE_WEBASSEMBLY void AssembleArchTrap(Instruction* instr, FlagsCondition condition); #endif // V8_ENABLE_WEBASSEMBLY +#if V8_TARGET_ARCH_X64 + void AssembleArchBinarySearchSwitchRange( + Register input, RpoNumber def_block, std::pair* begin, + std::pair* end, base::Optional& last_cmp_value); +#else void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block, std::pair* begin, std::pair* end); +#endif // V8_TARGET_ARCH_X64 void AssembleArchBinarySearchSwitch(Instruction* instr); void AssembleArchTableSwitch(Instruction* instr); @@ -448,7 +454,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { RpoNumber current_block_; SourcePosition start_source_position_; SourcePosition current_source_position_; - TurboAssembler tasm_; + MacroAssembler masm_; GapResolver resolver_; SafepointTableBuilder safepoints_; ZoneVector handlers_; diff --git a/src/compiler/backend/ia32/code-generator-ia32.cc b/src/compiler/backend/ia32/code-generator-ia32.cc index 4c9724a3a8..5ced2002c1 100644 --- a/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/src/compiler/backend/ia32/code-generator-ia32.cc @@ -29,7 +29,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> #define kScratchDoubleReg xmm0 @@ -202,11 +202,11 @@ class IA32OperandConverter : public InstructionOperandConverter { void MoveInstructionOperandToRegister(Register destination, InstructionOperand* op) { if (op->IsImmediate() || op->IsConstant()) { - gen_->tasm()->mov(destination, ToImmediate(op)); + gen_->masm()->mov(destination, ToImmediate(op)); } else if (op->IsRegister()) { - gen_->tasm()->Move(destination, ToRegister(op)); + gen_->masm()->Move(destination, ToRegister(op)); } else { - gen_->tasm()->mov(destination, ToOperand(op)); + gen_->masm()->mov(destination, ToOperand(op)); } } }; @@ -475,7 +475,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { XMMRegister src0 = i.InputSimd128Register(0); \ Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(i.OutputSimd128Register(), src0, src1); \ } else { \ DCHECK_EQ(i.OutputSimd128Register(), src0); \ @@ -485,11 +485,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode { #define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \ i.InputOperand(1), imm); \ } else { \ - CpuFeatureScope sse_scope(tasm(), SSELevel); \ + CpuFeatureScope sse_scope(masm(), SSELevel); \ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \ __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \ } @@ -532,26 +532,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode { int8_t laneidx = i.InputInt8(1); \ if (HasAddressingMode(instr)) { \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##OPCODE(dst, src, i.MemoryOperand(2), laneidx); \ } else { \ DCHECK_EQ(dst, src); \ - CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \ + CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \ __ OPCODE(dst, i.MemoryOperand(2), laneidx); \ } \ } else { \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##OPCODE(dst, src, i.InputOperand(2), laneidx); \ } else { \ DCHECK_EQ(dst, src); \ - CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \ + CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \ __ OPCODE(dst, i.InputOperand(2), laneidx); \ } \ } \ } while (false) - void CodeGenerator::AssembleDeconstructFrame() { __ mov(esp, ebp); __ pop(ebp); @@ -566,7 +565,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -574,10 +573,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, StandardFrameConstants::kFixedSlotCountAboveFp; int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { - tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); + masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize)); + masm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize)); state->IncreaseSPDelta(stack_slot_delta); } } @@ -617,7 +616,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand destination_location( LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), destination_location.index()); if (source.IsStackSlot()) { LocationOperand source_location(LocationOperand::cast(source)); @@ -635,13 +634,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, move->Eliminate(); } } - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -884,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1262,7 +1261,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0)); break; case kIA32Float32Round: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); RoundingMode const mode = static_cast(MiscField::decode(instr->opcode())); __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); @@ -2112,12 +2111,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kIA32Insertps: { if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); __ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputOperand(2), i.InputInt8(1) << 4); } else { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); __ insertps(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1) << 4); } @@ -2315,12 +2314,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister src1 = i.InputSimd128Register(0); XMMRegister src2 = i.InputSimd128Register(1); if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); __ vpminsd(kScratchDoubleReg, src1, src2); __ vpcmpeqd(dst, kScratchDoubleReg, src2); } else { DCHECK_EQ(dst, src1); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); __ pminsd(dst, src2); __ pcmpeqd(dst, src2); } @@ -2328,7 +2327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI32x4UConvertF32x4: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister tmp = i.TempSimd128Register(0); // NAN->0, negative->0 @@ -2356,7 +2355,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kAVXI32x4UConvertF32x4: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister tmp = i.TempSimd128Register(0); // NAN->0, negative->0 @@ -2406,7 +2405,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI32x4GtU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pmaxud(dst, src); @@ -2416,7 +2415,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI32x4GtU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); @@ -2428,7 +2427,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI32x4GeU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pminud(dst, src); @@ -2436,7 +2435,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI32x4GeU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminud(kScratchDoubleReg, src1, src2); @@ -2552,7 +2551,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI16x8Ne: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); __ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputOperand(1)); __ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); @@ -2574,7 +2573,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI16x8GeS: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminsw(kScratchDoubleReg, src1, src2); @@ -2621,7 +2620,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI16x8GtU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pmaxuw(dst, src); @@ -2631,7 +2630,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI16x8GtU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); @@ -2643,7 +2642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI16x8GeU: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pminuw(dst, src); @@ -2651,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI16x8GeU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminuw(kScratchDoubleReg, src1, src2); @@ -2844,7 +2843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI8x16Ne: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); __ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputOperand(1)); __ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); @@ -2859,7 +2858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kSSEI8x16GeS: { DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); Operand src = i.InputOperand(1); __ pminsb(dst, src); @@ -2867,7 +2866,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI8x16GeS: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminsb(kScratchDoubleReg, src1, src2); @@ -2925,7 +2924,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI8x16GtU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); @@ -2944,7 +2943,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXI8x16GeU: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister src1 = i.InputSimd128Register(0); Operand src2 = i.InputOperand(1); __ vpminub(kScratchDoubleReg, src1, src2); @@ -3183,7 +3182,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister src = i.InputSimd128Register(0); uint8_t lane = i.InputUint8(1) & 0xf; if (CpuFeatures::IsSupported(AVX)) { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); if (lane < 8) { __ vpunpcklbw(dst, src, src); } else { @@ -3234,7 +3233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw); break; case kSSES16x8UnzipHigh: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; DCHECK_EQ(dst, i.InputSimd128Register(0)); @@ -3248,7 +3247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS16x8UnzipHigh: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; if (instr->InputCount() == 2) { @@ -3260,7 +3259,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kSSES16x8UnzipLow: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; DCHECK_EQ(dst, i.InputSimd128Register(0)); @@ -3274,7 +3273,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS16x8UnzipLow: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; __ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg); @@ -3301,7 +3300,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS8x16UnzipHigh: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; if (instr->InputCount() == 2) { @@ -3328,7 +3327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS8x16UnzipLow: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src2 = dst; if (instr->InputCount() == 2) { @@ -3357,7 +3356,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS8x16TransposeLow: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); if (instr->InputCount() == 1) { __ vpsllw(kScratchDoubleReg, i.InputSimd128Register(0), 8); @@ -3387,7 +3386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kAVXS8x16TransposeHigh: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); if (instr->InputCount() == 1) { __ vpsrlw(dst, i.InputSimd128Register(0), 8); @@ -3423,7 +3422,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kAVXS8x4Reverse: case kAVXS8x8Reverse: { DCHECK_EQ(1, instr->InputCount()); - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); XMMRegister dst = i.OutputSimd128Register(); XMMRegister src = dst; if (arch_opcode != kAVXS8x2Reverse) { @@ -4205,8 +4204,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ j(greater, &mismatch_return, Label::kNear); __ Ret(parameter_slots * kSystemPointerSize, scratch_reg); __ bind(&mismatch_return); - __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // We use a return instead of a jump for better return address prediction. __ Ret(); } else if (additional_pop_count->IsImmediate()) { diff --git a/src/compiler/backend/ia32/instruction-selector-ia32.cc b/src/compiler/backend/ia32/instruction-selector-ia32.cc index 0be9186615..44b24fa829 100644 --- a/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -18,7 +18,7 @@ #include "src/codegen/ia32/assembler-ia32.h" #include "src/codegen/ia32/register-ia32.h" #include "src/codegen/machine-type.h" -#include "src/codegen/turbo-assembler.h" +#include "src/codegen/macro-assembler-base.h" #include "src/common/globals.h" #include "src/compiler/backend/instruction-codes.h" #include "src/compiler/backend/instruction-selector-impl.h" @@ -208,7 +208,7 @@ class IA32OperandGenerator final : public OperandGenerator { m.object().ResolvedValue())) { ptrdiff_t const delta = m.index().ResolvedValue() + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector()->isolate(), m.object().ResolvedValue()); if (is_int32(delta)) { inputs[(*input_count)++] = TempImmediate(static_cast(delta)); diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc index 2aa074ba72..0a2c18dc5f 100644 --- a/src/compiler/backend/instruction-selector.cc +++ b/src/compiler/backend/instruction-selector.cc @@ -451,7 +451,7 @@ bool InstructionSelector::CanAddressRelativeToRootsRegister( // 3. IsAddressableThroughRootRegister: Is the target address guaranteed to // have a fixed root-relative offset? If so, we can ignore 2. const bool this_root_relative_offset_is_constant = - TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(), + MacroAssemblerBase::IsAddressableThroughRootRegister(isolate(), reference); return this_root_relative_offset_is_constant; } diff --git a/src/compiler/backend/loong64/code-generator-loong64.cc b/src/compiler/backend/loong64/code-generator-loong64.cc index cf08ad96d3..a9944cf866 100644 --- a/src/compiler/backend/loong64/code-generator-loong64.cc +++ b/src/compiler/backend/loong64/code-generator-loong64.cc @@ -23,7 +23,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // TODO(LOONG_dev): consider renaming these macros. #define TRACE_MSG(msg) \ @@ -450,8 +450,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_BINOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ - UseScratchRegisterScope temps(tasm()); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ + UseScratchRegisterScope temps(masm()); \ Register scratch = temps.Acquire(); \ __ PrepareCallCFunction(0, 2, scratch); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ @@ -459,8 +459,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_UNOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ - UseScratchRegisterScope temps(tasm()); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ + UseScratchRegisterScope temps(masm()); \ Register scratch = temps.Acquire(); \ __ PrepareCallCFunction(0, 1, scratch); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -487,7 +487,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -495,10 +495,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, StandardFrameConstants::kFixedSlotCountAboveFp; int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { - tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize); + masm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize); + masm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } } @@ -507,19 +507,19 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } // Check that {kJavaScriptCallCodeStartRegister} is correct. void CodeGenerator::AssembleCodeStartRegisterCheck() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ ComputeCodeStartAddress(scratch); __ Assert(eq, AbortReason::kWrongFunctionCodeStart, @@ -534,7 +534,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { // 2. test kMarkedForDeoptimizationBit in those flags; and // 3. if it is not zero then it jumps to the builtin. void CodeGenerator::BailoutIfDeoptimized() { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; __ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset)); @@ -628,7 +628,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArchCallJSFunction: { Register func = i.InputRegister(0); if (v8_flags.debug_code) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Check the function's context matches the context argument. __ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset)); @@ -642,7 +642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArchPrepareCallCFunction: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); int const num_gp_parameters = ParamField::decode(instr->opcode()); int const num_fp_parameters = FPParamField::decode(instr->opcode()); @@ -749,7 +749,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -829,7 +829,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } else { DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode); DCHECK_EQ(addressing_mode, kMode_MRI); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Add_d(scratch, object, Operand(i.InputInt64(1))); __ amswap_db_d(zero_reg, value, scratch); @@ -843,7 +843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArchStackSlot: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); FrameOffset offset = frame_access_state()->GetFrameOffset(i.InputInt32(0)); @@ -1225,8 +1225,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kLoong64Float64Mod: { // TODO(turbofan): implement directly. - FrameScope scope(tasm(), StackFrame::MANUAL); - UseScratchRegisterScope temps(tasm()); + FrameScope scope(masm(), StackFrame::MANUAL); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ PrepareCallCFunction(0, 2, scratch); __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); @@ -1363,7 +1363,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0)); __ movfr2gr_s(i.OutputRegister(), scratch_d); if (set_overflow_to_min_i32) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, // because INT32_MIN allows easier out-of-bounds detection. @@ -1392,7 +1392,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kLoong64Float64ToInt64: { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); FPURegister scratch_d = kScratchDoubleReg; @@ -1438,7 +1438,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode()); __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); if (set_overflow_to_min_i32) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Avoid UINT32_MAX as an overflow indicator and use 0 instead, // because 0 allows easier out-of-bounds detection. @@ -1863,11 +1863,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( << "\""; \ UNIMPLEMENTED(); -void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, +void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm, Instruction* instr, FlagsCondition condition, Label* tlabel, Label* flabel, bool fallthru) { #undef __ -#define __ tasm-> +#define __ masm-> Loong64OperandConverter i(gen, instr); // LOONG64 does not have condition code flags, so compare and branch are @@ -1882,7 +1882,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, __ Branch(tlabel, cc, t8, Operand(zero_reg)); } else if (instr->arch_opcode() == kLoong64Add_d || instr->arch_opcode() == kLoong64Sub_d) { - UseScratchRegisterScope temps(tasm); + UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); Register scratch2 = temps.Acquire(); Condition cc = FlagsConditionToConditionOvf(condition); @@ -1941,7 +1941,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } if (!fallthru) __ Branch(flabel); // no fallthru to flabel. #undef __ -#define __ tasm()-> +#define __ masm()-> } // Assembles branches after an instruction. @@ -1949,7 +1949,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { Label* tlabel = branch->true_label; Label* flabel = branch->false_label; - AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, + AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel, branch->fallthru); } @@ -2014,7 +2014,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, }; auto ool = zone()->New(this, instr); Label* tlabel = ool->entry(); - AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); + AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true); } #endif // V8_ENABLE_WEBASSEMBLY @@ -2041,7 +2041,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, return; } else if (instr->arch_opcode() == kLoong64Add_d || instr->arch_opcode() == kLoong64Sub_d) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); Condition cc = FlagsConditionToConditionOvf(condition); // Check for overflow creates 1 or 0 for result. @@ -2289,7 +2289,7 @@ void CodeGenerator::AssembleConstructFrame() { // exception unconditionally. Thereby we can avoid the integer overflow // check in the condition code. if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Ld_d(scratch, FieldMemOperand( kWasmInstanceRegister, @@ -2444,7 +2444,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ Push(g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Ld_d(scratch, g.ToMemOperand(source)); __ Push(scratch); @@ -2467,7 +2467,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ Pop(g.ToRegister(dest)); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Pop(scratch); __ St_d(scratch, g.ToMemOperand(dest)); @@ -2495,7 +2495,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. DCHECK(!source->IsImmediate()); - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick one to // resolve the move cycle. Re-include them immediately afterwards as they @@ -2585,7 +2585,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest, void CodeGenerator::SetPendingMove(MoveOperands* move) { InstructionOperand* src = &move->source(); InstructionOperand* dst = &move->destination(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (src->IsConstant() || (src->IsStackSlot() && dst->IsStackSlot())) { Register temp = temps.Acquire(); move_cycle_.scratch_regs.set(temp); @@ -2642,7 +2642,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, if (destination->IsRegister()) { __ Ld_d(g.ToRegister(destination), src); } else { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Ld_d(scratch, src); __ St_d(scratch, g.ToMemOperand(destination)); @@ -2650,7 +2650,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else if (source->IsConstant()) { Constant src = g.ToConstant(source); if (destination->IsRegister() || destination->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); Register dst = destination->IsRegister() ? g.ToRegister(destination) : scratch; @@ -2697,7 +2697,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, if (base::bit_cast(src.ToFloat32()) == 0) { __ St_d(zero_reg, dst); } else { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ li(scratch, Operand(base::bit_cast(src.ToFloat32()))); __ St_d(scratch, dst); @@ -2748,7 +2748,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, // Dispatch on the source and destination operand kinds. Not all // combinations are possible. if (source->IsRegister()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); // Register-register. Register src = g.ToRegister(source); @@ -2770,7 +2770,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, // Since the Ld instruction may need a scratch reg, // we should not use both of the two scratch registers in // UseScratchRegisterScope here. - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); FPURegister scratch_d = kScratchDoubleReg; MemOperand src = g.ToMemOperand(source); @@ -2796,7 +2796,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, } } else if (source->IsFPStackSlot()) { DCHECK(destination->IsFPStackSlot()); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); FPURegister scratch_d = kScratchDoubleReg; MemOperand src = g.ToMemOperand(source); diff --git a/src/compiler/backend/loong64/instruction-selector-loong64.cc b/src/compiler/backend/loong64/instruction-selector-loong64.cc index 091c7ad9bc..6f7841be97 100644 --- a/src/compiler/backend/loong64/instruction-selector-loong64.cc +++ b/src/compiler/backend/loong64/instruction-selector-loong64.cc @@ -360,7 +360,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); // Check that the delta is a 32-bit integer due to the limitations of // immediate operands. @@ -560,7 +560,7 @@ void InstructionSelector::VisitStore(Node* node) { CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( isolate(), m.ResolvedValue()); // Check that the delta is a 32-bit integer due to the limitations of // immediate operands. @@ -1398,21 +1398,33 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { } void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { - // On LoongArch64, int32 values should all be sign-extended to 64-bit, so - // no need to sign-extend them here. - // But when call to a host function in simulator, if the function return an - // int32 value, the simulator do not sign-extend to int64, because in - // simulator we do not know the function whether return an int32 or int64. -#ifdef USE_SIMULATOR Node* value = node->InputAt(0); - if (value->opcode() == IrOpcode::kCall) { + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kLoadImmutable) && + CanCover(node, value)) { + // Generate sign-extending load. + LoadRepresentation load_rep = LoadRepresentationOf(value->op()); + InstructionCode opcode = kArchNop; + switch (load_rep.representation()) { + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kWord8: + opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h; + break; + case MachineRepresentation::kWord32: + opcode = kLoong64Ld_w; + break; + default: + UNREACHABLE(); + } + EmitLoad(this, value, opcode, node); + } else { Loong64OperandGenerator g(this); Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value), g.TempImmediate(0)); - return; } -#endif - EmitIdentity(node); } bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { diff --git a/src/compiler/backend/mips64/code-generator-mips64.cc b/src/compiler/backend/mips64/code-generator-mips64.cc index 29d1777720..8757782513 100644 --- a/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/src/compiler/backend/mips64/code-generator-mips64.cc @@ -23,7 +23,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // TODO(plind): consider renaming these macros. #define TRACE_MSG(msg) \ @@ -464,7 +464,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_BINOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -475,7 +475,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_UNOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1, kScratchReg); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -504,7 +504,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -512,10 +512,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, StandardFrameConstants::kFixedSlotCountAboveFp; int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { - tasm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize); + masm->Dsubu(sp, sp, stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize); + masm->Daddu(sp, sp, -stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } } @@ -524,13 +524,13 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -766,7 +766,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1290,7 +1290,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kMips64ModD: { // TODO(bmeurer): We should really get rid of this special instruction, // and generate a CallAddress instruction instead. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PrepareCallCFunction(0, 2, kScratchReg); __ MovToFloatParameters(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); @@ -1771,7 +1771,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kMips64StoreToStackSlot: { if (instr->InputAt(0)->IsFPRegister()) { if (instr->InputAt(0)->IsSimd128Register()) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1))); } else { __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); @@ -1790,13 +1790,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128LoadSplat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); auto sz = static_cast(MiscField::decode(instr->opcode())); __ LoadSplat(sz, i.OutputSimd128Register(), i.MemoryOperand()); break; } case kMips64S128Load8x8S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register scratch = kSimd128ScratchReg; __ Ld(kScratchReg, i.MemoryOperand()); @@ -1806,7 +1806,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load8x8U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ Ld(kScratchReg, i.MemoryOperand()); @@ -1815,7 +1815,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load16x4S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register scratch = kSimd128ScratchReg; __ Ld(kScratchReg, i.MemoryOperand()); @@ -1825,7 +1825,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load16x4U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ Ld(kScratchReg, i.MemoryOperand()); @@ -1834,7 +1834,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load32x2S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register scratch = kSimd128ScratchReg; __ Ld(kScratchReg, i.MemoryOperand()); @@ -1844,7 +1844,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load32x2U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ Ld(kScratchReg, i.MemoryOperand()); @@ -1853,7 +1853,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load32Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(dst, dst, dst); __ Lwu(kScratchReg, i.MemoryOperand()); @@ -1861,7 +1861,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Load64Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(dst, dst, dst); __ Ld(kScratchReg, i.MemoryOperand()); @@ -1869,7 +1869,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128LoadLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); DCHECK_EQ(dst, i.InputSimd128Register(0)); auto sz = static_cast(MiscField::decode(instr->opcode())); @@ -1877,7 +1877,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128StoreLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); auto sz = static_cast(MiscField::decode(instr->opcode())); __ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2)); @@ -2055,7 +2055,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( i.InputRegister(0), Operand(i.InputRegister(1))); break; case kMips64S128Const: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); uint64_t imm1 = make_uint64(i.InputUint32(1), i.InputUint32(0)); uint64_t imm2 = make_uint64(i.InputUint32(3), i.InputUint32(2)); @@ -2066,30 +2066,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ xor_v(dst, dst, dst); break; } case kMips64S128AllOnes: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ ceq_d(dst, dst, dst); break; } case kMips64I32x4Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fill_w(i.OutputSimd128Register(), i.InputRegister(0)); break; } case kMips64I32x4ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I32x4ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); if (src != dst) { @@ -2099,54 +2099,54 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63); break; } case kMips64F64x2Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ bnegi_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63); break; } case kMips64F64x2Sqrt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fsqrt_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64F64x2Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); ASSEMBLE_F64X2_ARITHMETIC_BINOP(fadd_d); break; } case kMips64F64x2Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); ASSEMBLE_F64X2_ARITHMETIC_BINOP(fsub_d); break; } case kMips64F64x2Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmul_d); break; } case kMips64F64x2Div: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); ASSEMBLE_F64X2_ARITHMETIC_BINOP(fdiv_d); break; } case kMips64F64x2Min: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2169,7 +2169,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2Max: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2192,43 +2192,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fcune_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Lt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fclt_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Le: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fcle_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F64x2Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ Move(kScratchReg, i.InputDoubleRegister(0)); __ fill_d(i.OutputSimd128Register(), kScratchReg); break; } case kMips64F64x2ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_d(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1)); __ Move(i.OutputDoubleRegister(), kScratchReg); break; } case kMips64F64x2ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); __ Move(kScratchReg, i.InputDoubleRegister(2)); @@ -2239,18 +2239,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fill_d(i.OutputSimd128Register(), i.InputRegister(0)); break; } case kMips64I64x2ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_d(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64F64x2Pmin: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); @@ -2260,7 +2260,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2Pmax: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); @@ -2270,31 +2270,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2Ceil: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToPlusInf); break; } case kMips64F64x2Floor: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToMinusInf); break; } case kMips64F64x2Trunc: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToZero); break; } case kMips64F64x2NearestInt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundD(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToNearest); break; } case kMips64F64x2ConvertLowI32x4S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0)); __ slli_d(kSimd128RegZero, kSimd128RegZero, 32); @@ -2303,19 +2303,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F64x2ConvertLowI32x4U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0)); __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero); break; } case kMips64F64x2PromoteLowF32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I64x2ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); if (src != dst) { @@ -2325,32 +2325,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I64x2Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I64x2Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ mulv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I64x2Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ subv_d(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I64x2Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_d(kSimd128ScratchReg, i.InputRegister(1)); __ sll_d(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2362,7 +2362,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_d(kSimd128ScratchReg, i.InputRegister(1)); __ sra_d(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2374,7 +2374,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_d(kSimd128ScratchReg, i.InputRegister(1)); __ srl_d(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2386,7 +2386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register scratch0 = kSimd128RegZero; @@ -2399,13 +2399,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I64x2Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(), @@ -2413,26 +2413,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I64x2GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I64x2Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ add_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0), kSimd128RegZero); break; } case kMips64I64x2SConvertI32x4Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvr_w(kSimd128ScratchReg, src, src); @@ -2441,7 +2441,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2SConvertI32x4High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvl_w(kSimd128ScratchReg, src, src); @@ -2450,14 +2450,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2UConvertI32x4Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I64x2UConvertI32x4High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); @@ -2482,19 +2482,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ FmoveLow(kScratchReg, i.InputSingleRegister(0)); __ fill_w(i.OutputSimd128Register(), kScratchReg); break; } case kMips64F32x4ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1)); __ FmoveLow(i.OutputSingleRegister(), kScratchReg); break; } case kMips64F32x4ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); __ FmoveLow(kScratchReg, i.InputSingleRegister(2)); @@ -2505,48 +2505,48 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4SConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64F32x4UConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I32x4Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4MaxS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4MinS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); __ nor_v(dst, dst, dst); break; } case kMips64I32x4Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_w(kSimd128ScratchReg, i.InputRegister(1)); __ sll_w(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2558,7 +2558,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_w(kSimd128ScratchReg, i.InputRegister(1)); __ sra_w(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2570,7 +2570,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_w(kSimd128ScratchReg, i.InputRegister(1)); __ srl_w(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2582,26 +2582,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4MaxU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4MinU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64S128Select: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0)); __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2), i.InputSimd128Register(1)); break; } case kMips64S128AndNot: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register scratch = kSimd128ScratchReg, dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), @@ -2611,41 +2611,41 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); break; } case kMips64F32x4Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); break; } case kMips64F32x4Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Div: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Max: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2668,7 +2668,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Min: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -2691,31 +2691,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fcune_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Lt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Le: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64F32x4Pmin: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); @@ -2725,7 +2725,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Pmax: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register lhs = i.InputSimd128Register(0); Simd128Register rhs = i.InputSimd128Register(1); @@ -2735,91 +2735,91 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64F32x4Ceil: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToPlusInf); break; } case kMips64F32x4Floor: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToMinusInf); break; } case kMips64F32x4Trunc: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToZero); break; } case kMips64F32x4NearestInt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ MSARoundW(i.OutputSimd128Register(), i.InputSimd128Register(0), kRoundToNearest); break; } case kMips64F32x4DemoteF64x2Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I32x4SConvertF32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I32x4UConvertF32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64F32x4Sqrt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I32x4Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ subv_w(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I32x4GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I32x4GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I32x4GtU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I32x4GeU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I32x4Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), kSimd128RegZero); break; } case kMips64I32x4BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register scratch0 = kSimd128RegZero; @@ -2834,13 +2834,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4DotI16x8S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I32x4TruncSatF64x2SZero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0)); __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31); @@ -2849,7 +2849,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4TruncSatF64x2UZero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0)); __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31); @@ -2858,24 +2858,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fill_h(i.OutputSimd128Register(), i.InputRegister(0)); break; } case kMips64I16x8ExtractLaneU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_u_h(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I16x8ExtractLaneS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I16x8ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); if (src != dst) { @@ -2885,14 +2885,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ subv_h(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I16x8Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_h(kSimd128ScratchReg, i.InputRegister(1)); __ sll_h(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2904,7 +2904,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_h(kSimd128ScratchReg, i.InputRegister(1)); __ sra_h(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2916,7 +2916,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_h(kSimd128ScratchReg, i.InputRegister(1)); __ srl_h(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -2928,123 +2928,123 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8AddSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8SubSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8MaxS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8MinS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); __ nor_v(dst, dst, dst); break; } case kMips64I16x8GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8AddSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8SubSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8MaxU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8MinU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I16x8GtU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8GeU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8RoundingAverageU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ aver_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I16x8Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), kSimd128RegZero); break; } case kMips64I16x8BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register scratch0 = kSimd128RegZero; @@ -3061,30 +3061,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8Q15MulRSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ mulr_q_h(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ fill_b(i.OutputSimd128Register(), i.InputRegister(0)); break; } case kMips64I8x16ExtractLaneU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_u_b(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I8x16ExtractLaneS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1)); break; } case kMips64I8x16ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register src = i.InputSimd128Register(0); Simd128Register dst = i.OutputSimd128Register(); if (src != dst) { @@ -3094,14 +3094,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ subv_b(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I8x16Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_b(kSimd128ScratchReg, i.InputRegister(1)); __ sll_b(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -3113,7 +3113,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_b(kSimd128ScratchReg, i.InputRegister(1)); __ sra_b(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -3125,68 +3125,68 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16AddSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16SubSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16MaxS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16MinS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); __ nor_v(dst, dst, dst); break; } case kMips64I8x16GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (instr->InputAt(1)->IsRegister()) { __ fill_b(kSimd128ScratchReg, i.InputRegister(1)); __ srl_b(i.OutputSimd128Register(), i.InputSimd128Register(0), @@ -3198,61 +3198,61 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16AddSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16SubSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16MaxU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16MinU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64I8x16GtU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16GeU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16RoundingAverageU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ aver_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), i.InputSimd128Register(0)); break; } case kMips64I8x16Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), kSimd128RegZero); break; } case kMips64I8x16Popcnt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0)); break; } case kMips64I8x16BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Simd128Register src = i.InputSimd128Register(0); Simd128Register scratch0 = kSimd128RegZero; @@ -3270,31 +3270,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S128And: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64S128Or: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64S128Xor: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(1)); break; } case kMips64S128Not: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), i.InputSimd128Register(0)); break; } case kMips64V128AnyTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_false; __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero, @@ -3305,7 +3305,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I64x2AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_true; __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero, @@ -3316,7 +3316,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_true; __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero, @@ -3327,7 +3327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_true; __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero, @@ -3338,7 +3338,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Register dst = i.OutputRegister(); Label all_true; __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero, @@ -3349,17 +3349,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64MsaLd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ ld_b(i.OutputSimd128Register(), i.MemoryOperand()); break; } case kMips64MsaSt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ st_b(i.InputSimd128Register(2), i.MemoryOperand()); break; } case kMips64S32x4InterleaveRight: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3369,7 +3369,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4InterleaveLeft: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3379,7 +3379,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4PackEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3389,7 +3389,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4PackOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3399,7 +3399,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4InterleaveEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3409,7 +3409,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4InterleaveOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3419,7 +3419,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S32x4Shuffle: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3473,7 +3473,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8InterleaveRight: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3483,7 +3483,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8InterleaveLeft: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3493,7 +3493,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8PackEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3503,7 +3503,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8PackOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3513,7 +3513,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8InterleaveEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3523,7 +3523,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x8InterleaveOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3533,21 +3533,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S16x4Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3] // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B); break; } case kMips64S16x2Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1] // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1 __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1); break; } case kMips64S8x16InterleaveRight: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3557,7 +3557,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16InterleaveLeft: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3567,7 +3567,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16PackEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3577,7 +3577,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16PackOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3587,7 +3587,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16InterleaveEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3597,7 +3597,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16InterleaveOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3607,14 +3607,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x16Concat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); DCHECK(dst == i.InputSimd128Register(0)); __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2)); break; } case kMips64I8x16Shuffle: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(), src0 = i.InputSimd128Register(0), src1 = i.InputSimd128Register(1); @@ -3650,7 +3650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x8Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7] // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1 @@ -3660,21 +3660,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64S8x4Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3] // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B); break; } case kMips64S8x2Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1] // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1 __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1); break; } case kMips64I32x4SConvertI16x8Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvr_h(kSimd128ScratchReg, src, src); @@ -3683,7 +3683,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4SConvertI16x8High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvl_h(kSimd128ScratchReg, src, src); @@ -3692,21 +3692,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I32x4UConvertI16x8Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I32x4UConvertI16x8High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I16x8SConvertI8x16Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvr_b(kSimd128ScratchReg, src, src); @@ -3715,7 +3715,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8SConvertI8x16High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src = i.InputSimd128Register(0); __ ilvl_b(kSimd128ScratchReg, src, src); @@ -3724,7 +3724,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8SConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -3734,7 +3734,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8UConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -3747,21 +3747,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I16x8UConvertI8x16Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I16x8UConvertI8x16High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero, i.InputSimd128Register(0)); break; } case kMips64I8x16SConvertI16x8: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -3771,7 +3771,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kMips64I8x16UConvertI16x8: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); Simd128Register dst = i.OutputSimd128Register(); Simd128Register src0 = i.InputSimd128Register(0); Simd128Register src1 = i.InputSimd128Register(1); @@ -3792,11 +3792,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( << "\""; \ UNIMPLEMENTED(); -void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, +void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm, Instruction* instr, FlagsCondition condition, Label* tlabel, Label* flabel, bool fallthru) { #undef __ -#define __ tasm-> +#define __ masm-> MipsOperandConverter i(gen, instr); // MIPS does not have condition code flags, so compare and branch are @@ -3867,7 +3867,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } if (!fallthru) __ Branch(flabel); // no fallthru to flabel. #undef __ -#define __ tasm()-> +#define __ masm()-> } // Assembles branches after an instruction. @@ -3875,7 +3875,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { Label* tlabel = branch->true_label; Label* flabel = branch->false_label; - AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, + AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel, branch->fallthru); } @@ -3940,7 +3940,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, }; auto ool = zone()->New(this, instr); Label* tlabel = ool->entry(); - AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); + AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true); } #endif // V8_ENABLE_WEBASSEMBLY @@ -4384,7 +4384,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ Push(g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Ld(scratch, g.ToMemOperand(source)); __ Push(scratch); @@ -4407,7 +4407,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ Pop(g.ToRegister(dest)); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Pop(scratch); __ Sd(scratch, g.ToMemOperand(dest)); @@ -4435,7 +4435,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. DCHECK(!source->IsImmediate()); - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick one to // resolve the move cycle. Re-include them immediately afterwards as they @@ -4481,7 +4481,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest, void CodeGenerator::SetPendingMove(MoveOperands* move) { InstructionOperand* src = &move->source(); InstructionOperand* dst = &move->destination(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (src->IsConstant() && dst->IsFPLocationOperand()) { Register temp = temps.Acquire(); move_cycle_.scratch_regs.set(temp); @@ -4600,7 +4600,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, } else if (source->IsFPRegister()) { MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); MSARegister src = g.ToSimd128Register(source); if (destination->IsSimd128Register()) { MSARegister dst = g.ToSimd128Register(destination); @@ -4624,7 +4624,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, MemOperand src = g.ToMemOperand(source); MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); if (destination->IsSimd128Register()) { __ ld_b(g.ToSimd128Register(destination), src); } else { @@ -4682,7 +4682,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, } else if (source->IsFPRegister()) { MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); MSARegister temp = kSimd128ScratchReg; MSARegister src = g.ToSimd128Register(source); if (destination->IsSimd128Register()) { @@ -4722,7 +4722,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, MemOperand dst1(dst0.rm(), dst0.offset() + kInt64Size); MachineRepresentation rep = LocationOperand::cast(source)->representation(); if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); + CpuFeatureScope msa_scope(masm(), MIPS_SIMD); MSARegister temp_1 = kSimd128ScratchReg; __ ld_b(temp_1, dst0); // Save destination in temp_1. __ Ld(temp_0, src0); // Then use temp_0 to copy source to destination. diff --git a/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/src/compiler/backend/mips64/instruction-scheduler-mips64.cc index 1d17d4bd58..af0746622f 100644 --- a/src/compiler/backend/mips64/instruction-scheduler-mips64.cc +++ b/src/compiler/backend/mips64/instruction-scheduler-mips64.cc @@ -775,7 +775,7 @@ int PrepareForTailCallLatency() { int AssertLatency() { return 1; } int PrepareCallCFunctionLatency() { - int frame_alignment = TurboAssembler::ActivationFrameAlignment(); + int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (frame_alignment > kSystemPointerSize) { return 1 + DsubuLatency(false) + AndLatency(false) + 1; } else { diff --git a/src/compiler/backend/mips64/instruction-selector-mips64.cc b/src/compiler/backend/mips64/instruction-selector-mips64.cc index 8b4398eecb..825bfc1ba0 100644 --- a/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -1481,21 +1481,33 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) { } void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { - // On MIPS64, int32 values should all be sign-extended to 64-bit, so - // no need to sign-extend them here. - // But when call to a host function in simulator, if the function return an - // int32 value, the simulator do not sign-extend to int64, because in - // simulator we do not know the function whether return an int32 or int64. -#ifdef USE_SIMULATOR Node* value = node->InputAt(0); - if (value->opcode() == IrOpcode::kCall) { + if ((value->opcode() == IrOpcode::kLoad || + value->opcode() == IrOpcode::kLoadImmutable) && + CanCover(node, value)) { + // Generate sign-extending load. + LoadRepresentation load_rep = LoadRepresentationOf(value->op()); + InstructionCode opcode = kArchNop; + switch (load_rep.representation()) { + case MachineRepresentation::kBit: // Fall through. + case MachineRepresentation::kWord8: + opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb; + break; + case MachineRepresentation::kWord16: + opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh; + break; + case MachineRepresentation::kWord32: + opcode = kMips64Lw; + break; + default: + UNREACHABLE(); + } + EmitLoad(this, value, opcode, node); + } else { Mips64OperandGenerator g(this); Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value), g.TempImmediate(0)); - return; } -#endif - EmitIdentity(node); } bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { diff --git a/src/compiler/backend/ppc/code-generator-ppc.cc b/src/compiler/backend/ppc/code-generator-ppc.cc index d0f90150f2..515934a726 100644 --- a/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/src/compiler/backend/ppc/code-generator-ppc.cc @@ -23,7 +23,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> #define kScratchReg r11 @@ -170,9 +170,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode { } void Generate() final { - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); if (COMPRESS_POINTERS_BOOL) { - __ DecompressTaggedPointer(value_, value_); + __ DecompressTagged(value_, value_); } __ CheckPageFlag( value_, scratch0_, @@ -409,7 +409,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { #define ASSEMBLE_FLOAT_MODULO() \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -422,7 +422,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1, kScratchReg); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -435,7 +435,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -680,20 +680,20 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void FlushPendingPushRegisters(TurboAssembler* tasm, +void FlushPendingPushRegisters(MacroAssembler* masm, FrameAccessState* frame_access_state, ZoneVector* pending_pushes) { switch (pending_pushes->size()) { case 0: break; case 1: - tasm->Push((*pending_pushes)[0]); + masm->Push((*pending_pushes)[0]); break; case 2: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]); + masm->Push((*pending_pushes)[0], (*pending_pushes)[1]); break; case 3: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1], + masm->Push((*pending_pushes)[0], (*pending_pushes)[1], (*pending_pushes)[2]); break; default: @@ -704,7 +704,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm, } void AdjustStackPointerForTailCall( - TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, + MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, ZoneVector* pending_pushes = nullptr, bool allow_shrinkage = true) { int current_sp_offset = state->GetSPToFPSlotCount() + @@ -712,15 +712,15 @@ void AdjustStackPointerForTailCall( int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); + masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); + masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0); state->IncreaseSPDelta(stack_slot_delta); } } @@ -742,7 +742,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); AdjustStackPointerForTailCall( - tasm(), frame_access_state(), + masm(), frame_access_state(), destination_location.index() - pending_pushes.size(), &pending_pushes); // Pushes of non-register data types are not supported. @@ -752,20 +752,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, // TODO(arm): We can push more than 3 registers at once. Add support in // the macro-assembler for pushing a list of registers. if (pending_pushes.size() == 3) { - FlushPendingPushRegisters(tasm(), frame_access_state(), + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } move->Eliminate(); } - FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes); + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, nullptr, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -793,8 +793,8 @@ void CodeGenerator::BailoutIfDeoptimized() { } int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; - __ LoadTaggedPointerField( - r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0); + __ LoadTaggedField(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), + r0); __ LoadS32(r11, FieldMemOperand(r11, Code::kKindSpecificFlagsOffset), r0); __ TestBit(r11, InstructionStream::kMarkedForDeoptimizationBit); __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), @@ -810,7 +810,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( switch (opcode) { case kArchCallCodeObject: { v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( - tasm()); + masm()); if (HasRegisterInput(instr, 0)) { Register reg = i.InputRegister(0); DCHECK_IMPLIES( @@ -883,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } else { // We cannot use the constant pool to load the target since // we've already restored the caller's frame. - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); } DCHECK_EQ(LeaveRC, i.OutputRCBit()); @@ -904,18 +904,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kArchCallJSFunction: { v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool( - tasm()); + masm()); Register func = i.InputRegister(0); if (v8_flags.debug_code) { // Check the function's context matches the context argument. - __ LoadTaggedPointerField( + __ LoadTaggedField( kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0); __ CmpS64(cp, kScratchReg); __ Assert(eq, AbortReason::kWrongFunctionContext); } static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch"); - __ LoadTaggedPointerField( - r5, FieldMemOperand(func, JSFunction::kCodeOffset), r0); + __ LoadTaggedField(r5, FieldMemOperand(func, JSFunction::kCodeOffset), + r0); __ CallCodeObject(r5); RecordCallPosition(instr); DCHECK_EQ(LeaveRC, i.OutputRCBit()); @@ -1058,7 +1058,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -2880,13 +2880,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false); break; } - case kPPC_LoadDecompressTaggedPointer: { - CHECK(instr->HasOutput()); - ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false); - __ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister); - break; - } - case kPPC_LoadDecompressAnyTagged: { + case kPPC_LoadDecompressTagged: { CHECK(instr->HasOutput()); ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false); __ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister); @@ -3320,7 +3314,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { AssembleDeconstructFrame(); } // Constant pool is unavailable since the frame has been destructed - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); if (drop_jsargs) { // We must pop all arguments from the stack (including the receiver). // The number of arguments without the receiver is @@ -3334,8 +3328,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ mov(argc_reg, Operand(parameter_slots)); __ bind(&skip); } - __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); } else if (additional_pop_count->IsImmediate()) { int additional_count = g.ToConstant(additional_pop_count).ToInt32(); __ Drop(parameter_slots + additional_count); @@ -3391,7 +3385,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { frame_access_state()->IncreaseSPDelta(-new_slots); PPCOperandConverter g(this, nullptr); if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Pop(scratch); __ StoreU64(scratch, g.ToMemOperand(dest), r0); diff --git a/src/compiler/backend/ppc/instruction-codes-ppc.h b/src/compiler/backend/ppc/instruction-codes-ppc.h index 5710aa313d..bce59db4de 100644 --- a/src/compiler/backend/ppc/instruction-codes-ppc.h +++ b/src/compiler/backend/ppc/instruction-codes-ppc.h @@ -411,8 +411,7 @@ namespace compiler { V(PPC_S128Store64Lane) \ V(PPC_StoreCompressTagged) \ V(PPC_LoadDecompressTaggedSigned) \ - V(PPC_LoadDecompressTaggedPointer) \ - V(PPC_LoadDecompressAnyTagged) + V(PPC_LoadDecompressTagged) // Addressing modes represent the "shape" of inputs to an instruction. // Many instructions support multiple addressing modes. Addressing modes diff --git a/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/src/compiler/backend/ppc/instruction-scheduler-ppc.cc index e1d195f253..83ce987666 100644 --- a/src/compiler/backend/ppc/instruction-scheduler-ppc.cc +++ b/src/compiler/backend/ppc/instruction-scheduler-ppc.cc @@ -331,8 +331,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kPPC_LoadSimd128: case kPPC_Peek: case kPPC_LoadDecompressTaggedSigned: - case kPPC_LoadDecompressTaggedPointer: - case kPPC_LoadDecompressAnyTagged: + case kPPC_LoadDecompressTagged: case kPPC_S128Load8Splat: case kPPC_S128Load16Splat: case kPPC_S128Load32Splat: diff --git a/src/compiler/backend/ppc/instruction-selector-ppc.cc b/src/compiler/backend/ppc/instruction-selector-ppc.cc index a5069d22fd..5d292cc79d 100644 --- a/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -214,10 +214,10 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node, opcode = kPPC_LoadDecompressTaggedSigned; break; case MachineRepresentation::kTaggedPointer: - opcode = kPPC_LoadDecompressTaggedPointer; + opcode = kPPC_LoadDecompressTagged; break; case MachineRepresentation::kTagged: - opcode = kPPC_LoadDecompressAnyTagged; + opcode = kPPC_LoadDecompressTagged; break; #else case MachineRepresentation::kTaggedSigned: // Fall through. diff --git a/src/compiler/backend/riscv/code-generator-riscv.cc b/src/compiler/backend/riscv/code-generator-riscv.cc index 624ef0ac81..78493e70f1 100644 --- a/src/compiler/backend/riscv/code-generator-riscv.cc +++ b/src/compiler/backend/riscv/code-generator-riscv.cc @@ -19,7 +19,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // TODO(plind): consider renaming these macros. #define TRACE_MSG(msg) \ @@ -173,7 +173,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { void Generate() final { #if V8_TARGET_ARCH_RISCV64 if (COMPRESS_POINTERS_BOOL) { - __ DecompressTaggedPointer(value_, value_); + __ DecompressTagged(value_, value_); } #endif __ CheckPageFlag( @@ -334,7 +334,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \ __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \ __ PrepareCallCFunction(3, 0, kScratchReg); \ @@ -344,7 +344,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); \ __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); \ __ PrepareCallCFunction(3, 0, kScratchReg); \ @@ -473,7 +473,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_BINOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -484,7 +484,7 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, #define ASSEMBLE_IEEE754_UNOP(name) \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1, kScratchReg); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -582,7 +582,7 @@ void CodeGenerator::AssembleArchSelect(Instruction* instr, namespace { -void AdjustStackPointerForTailCall(TurboAssembler* tasm, +void AdjustStackPointerForTailCall(MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, bool allow_shrinkage = true) { @@ -590,10 +590,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, StandardFrameConstants::kFixedSlotCountAboveFp; int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { - tasm->SubWord(sp, sp, stack_slot_delta * kSystemPointerSize); + masm->SubWord(sp, sp, stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->AddWord(sp, sp, -stack_slot_delta * kSystemPointerSize); + masm->AddWord(sp, sp, -stack_slot_delta * kSystemPointerSize); state->IncreaseSPDelta(stack_slot_delta); } } @@ -602,13 +602,13 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm, void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -628,8 +628,8 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() { // 3. if it is not zero then it jumps to the builtin. void CodeGenerator::BailoutIfDeoptimized() { int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; - __ LoadTaggedPointerField( - kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); + __ LoadTaggedField(kScratchReg, + MemOperand(kJavaScriptCallCodeStartRegister, offset)); __ Lw(kScratchReg, FieldMemOperand(kScratchReg, Code::kKindSpecificFlagsOffset)); __ And(kScratchReg, kScratchReg, @@ -722,14 +722,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputOrZeroRegister(0); if (v8_flags.debug_code) { // Check the function's context matches the context argument. - __ LoadTaggedPointerField( - kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); + __ LoadTaggedField(kScratchReg, + FieldMemOperand(func, JSFunction::kContextOffset)); __ Assert(eq, AbortReason::kWrongFunctionContext, cp, Operand(kScratchReg)); } static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - __ LoadTaggedPointerField(a2, - FieldMemOperand(func, JSFunction::kCodeOffset)); + __ LoadTaggedField(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); __ CallCodeObject(a2); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -829,7 +828,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1295,7 +1294,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kRiscvModS: { // TODO(bmeurer): We should really get rid of this special instruction, // and generate a CallAddress instruction instead. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PrepareCallCFunction(0, 2, kScratchReg); __ MovToFloatParameters(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); @@ -1425,7 +1424,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kRiscvModD: { // TODO(bmeurer): We should really get rid of this special instruction, // and generate a CallAddress instruction instead. - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PrepareCallCFunction(0, 2, kScratchReg); __ MovToFloatParameters(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); @@ -1940,7 +1939,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; #if V8_TARGET_ARCH_RISCV32 case kRiscvWord32AtomicPairLoad: { - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); __ PrepareCallCFunction(1, 0, kScratchReg); @@ -1949,7 +1948,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kRiscvWord32AtomicPairStore: { - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); __ PushCallerSaved(SaveFPRegsMode::kIgnore); __ PrepareCallCFunction(3, 0, kScratchReg); @@ -1972,7 +1971,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function) ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function) case kRiscvWord32AtomicPairExchange: { - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); __ PrepareCallCFunction(3, 0, kScratchReg); __ AddWord(a0, i.InputRegister(0), i.InputRegister(1)); @@ -1982,7 +1981,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kRiscvWord32AtomicPairCompareExchange: { - FrameScope scope(tasm(), StackFrame::MANUAL); + FrameScope scope(masm(), StackFrame::MANUAL); __ PushCallerSaved(SaveFPRegsMode::kIgnore, a0, a1); __ PrepareCallCFunction(5, 0, kScratchReg); __ add(a0, i.InputRegister(0), i.InputRegister(1)); @@ -2194,18 +2193,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ DecompressTaggedSigned(result, operand); break; } - case kRiscvLoadDecompressTaggedPointer: { + case kRiscvLoadDecompressTagged: { CHECK(instr->HasOutput()); Register result = i.OutputRegister(); MemOperand operand = i.MemoryOperand(); - __ DecompressTaggedPointer(result, operand); - break; - } - case kRiscvLoadDecompressAnyTagged: { - CHECK(instr->HasOutput()); - Register result = i.OutputRegister(); - MemOperand operand = i.MemoryOperand(); - __ DecompressAnyTagged(result, operand); + __ DecompressTagged(result, operand); break; } #endif @@ -3711,11 +3703,11 @@ bool IsInludeEqual(Condition cc) { } } -void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, +void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm, Instruction* instr, FlagsCondition condition, Label* tlabel, Label* flabel, bool fallthru) { #undef __ -#define __ tasm-> +#define __ masm-> RiscvOperandConverter i(gen, instr); // RISC-V does not have condition code flags, so compare and branch are @@ -3806,7 +3798,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, } if (!fallthru) __ Branch(flabel); // no fallthru to flabel. #undef __ -#define __ tasm()-> +#define __ masm()-> } // Assembles branches after an instruction. @@ -3814,7 +3806,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { Label* tlabel = branch->true_label; Label* flabel = branch->false_label; - AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, + AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel, branch->fallthru); } @@ -3878,7 +3870,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr, }; auto ool = zone()->New(this, instr); Label* tlabel = ool->entry(); - AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); + AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true); } // Assembles boolean materializations after an instruction. @@ -4373,7 +4365,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source, MachineRepresentation rep) { // Must be kept in sync with {MoveTempLocationTo}. DCHECK(!source->IsImmediate()); - move_cycle_.temps.emplace(tasm()); + move_cycle_.temps.emplace(masm()); auto& temps = *move_cycle_.temps; // Temporarily exclude the reserved scratch registers while we pick one to // resolve the move cycle. Re-include them immediately afterwards as they @@ -4419,7 +4411,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest, void CodeGenerator::SetPendingMove(MoveOperands* move) { InstructionOperand* src = &move->source(); InstructionOperand* dst = &move->destination(); - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); if (src->IsConstant() && dst->IsFPLocationOperand()) { Register temp = temps.Acquire(); move_cycle_.scratch_regs.set(temp); @@ -4748,7 +4740,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, } } #endif - UseScratchRegisterScope scope(tasm()); + UseScratchRegisterScope scope(masm()); Register temp_0 = kScratchReg; Register temp_1 = kScratchReg2; __ LoadWord(temp_0, src); @@ -4775,7 +4767,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { __ Push(g.ToRegister(source)); frame_access_state()->IncreaseSPDelta(new_slots); } else if (source->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ LoadWord(scratch, g.ToMemOperand(source)); __ Push(scratch); @@ -4798,7 +4790,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { if (dest->IsRegister()) { __ Pop(g.ToRegister(dest)); } else if (dest->IsStackSlot()) { - UseScratchRegisterScope temps(tasm()); + UseScratchRegisterScope temps(masm()); Register scratch = temps.Acquire(); __ Pop(scratch); __ StoreWord(scratch, g.ToMemOperand(dest)); diff --git a/src/compiler/backend/riscv/instruction-codes-riscv.h b/src/compiler/backend/riscv/instruction-codes-riscv.h index efe7a23267..dd854aa22d 100644 --- a/src/compiler/backend/riscv/instruction-codes-riscv.h +++ b/src/compiler/backend/riscv/instruction-codes-riscv.h @@ -64,8 +64,7 @@ namespace compiler { V(RiscvWord64AtomicExchangeUint64) \ V(RiscvStoreCompressTagged) \ V(RiscvLoadDecompressTaggedSigned) \ - V(RiscvLoadDecompressTaggedPointer) \ - V(RiscvLoadDecompressAnyTagged) \ + V(RiscvLoadDecompressTagged) \ V(RiscvWord64AtomicCompareExchangeUint64) #elif V8_TARGET_ARCH_RISCV32 #define TARGET_ARCH_OPCODE_LIST_SPECAIL(V) \ diff --git a/src/compiler/backend/riscv/instruction-scheduler-riscv.cc b/src/compiler/backend/riscv/instruction-scheduler-riscv.cc index ea9e603920..51663e2b6e 100644 --- a/src/compiler/backend/riscv/instruction-scheduler-riscv.cc +++ b/src/compiler/backend/riscv/instruction-scheduler-riscv.cc @@ -377,8 +377,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kRiscvUlwu: case kRiscvWord64AtomicLoadUint64: case kRiscvLoadDecompressTaggedSigned: - case kRiscvLoadDecompressTaggedPointer: - case kRiscvLoadDecompressAnyTagged: + case kRiscvLoadDecompressTagged: #elif V8_TARGET_ARCH_RISCV32 case kRiscvWord32AtomicPairLoad: #endif @@ -744,7 +743,7 @@ int AssemblePopArgumentsAdoptFrameLatency() { int AssertLatency() { return 1; } int PrepareCallCFunctionLatency() { - int frame_alignment = TurboAssembler::ActivationFrameAlignment(); + int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (frame_alignment > kSystemPointerSize) { return 1 + Sub64Latency(false) + AndLatency(false) + 1; } else { diff --git a/src/compiler/backend/riscv/instruction-selector-riscv32.cc b/src/compiler/backend/riscv/instruction-selector-riscv32.cc index a8db8248b3..6f14d959e2 100644 --- a/src/compiler/backend/riscv/instruction-selector-riscv32.cc +++ b/src/compiler/backend/riscv/instruction-selector-riscv32.cc @@ -65,7 +65,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); // Check that the delta is a 32-bit integer due to the limitations of // immediate operands. diff --git a/src/compiler/backend/riscv/instruction-selector-riscv64.cc b/src/compiler/backend/riscv/instruction-selector-riscv64.cc index 83f5b5ecb4..873d22a5dd 100644 --- a/src/compiler/backend/riscv/instruction-selector-riscv64.cc +++ b/src/compiler/backend/riscv/instruction-selector-riscv64.cc @@ -168,7 +168,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode, selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) { ptrdiff_t const delta = g.GetIntegerConstantValue(index) + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector->isolate(), m.ResolvedValue()); // Check that the delta is a 32-bit integer due to the limitations of // immediate operands. @@ -280,10 +280,10 @@ void InstructionSelector::VisitLoad(Node* node) { opcode = kRiscvLoadDecompressTaggedSigned; break; case MachineRepresentation::kTaggedPointer: - opcode = kRiscvLoadDecompressTaggedPointer; + opcode = kRiscvLoadDecompressTagged; break; case MachineRepresentation::kTagged: - opcode = kRiscvLoadDecompressAnyTagged; + opcode = kRiscvLoadDecompressTagged; break; #else case MachineRepresentation::kTaggedSigned: // Fall through. @@ -1938,10 +1938,10 @@ void InstructionSelector::VisitWord64AtomicLoad(Node* node) { opcode = kRiscv64LdDecompressTaggedSigned; break; case MachineRepresentation::kTaggedPointer: - opcode = kRiscv64LdDecompressTaggedPointer; + opcode = kRiscv64LdDecompressTagged; break; case MachineRepresentation::kTagged: - opcode = kRiscv64LdDecompressAnyTagged; + opcode = kRiscv64LdDecompressTagged; break; #else case MachineRepresentation::kTaggedSigned: // Fall through. diff --git a/src/compiler/backend/s390/code-generator-s390.cc b/src/compiler/backend/s390/code-generator-s390.cc index 93b240a863..e3ee9963cf 100644 --- a/src/compiler/backend/s390/code-generator-s390.cc +++ b/src/compiler/backend/s390/code-generator-s390.cc @@ -22,7 +22,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> #define kScratchReg ip @@ -209,7 +209,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { void Generate() final { if (COMPRESS_POINTERS_BOOL) { - __ DecompressTaggedPointer(value_, value_); + __ DecompressTagged(value_, value_); } __ CheckPageFlag( value_, scratch0_, @@ -619,7 +619,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) { #define ASSEMBLE_FLOAT_MODULO() \ do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -631,7 +631,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 1, kScratchReg); \ __ MovToFloatParameter(i.InputDoubleRegister(0)); \ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ @@ -643,7 +643,7 @@ static inline int AssembleUnaryOp(Instruction* instr, _R _r, _M _m, _I _i) { do { \ /* TODO(bmeurer): We should really get rid of this special instruction, */ \ /* and generate a CallAddress instruction instead. */ \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ + FrameScope scope(masm(), StackFrame::MANUAL); \ __ PrepareCallCFunction(0, 2, kScratchReg); \ __ MovToFloatParameters(i.InputDoubleRegister(0), \ i.InputDoubleRegister(1)); \ @@ -1021,20 +1021,20 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { -void FlushPendingPushRegisters(TurboAssembler* tasm, +void FlushPendingPushRegisters(MacroAssembler* masm, FrameAccessState* frame_access_state, ZoneVector* pending_pushes) { switch (pending_pushes->size()) { case 0: break; case 1: - tasm->Push((*pending_pushes)[0]); + masm->Push((*pending_pushes)[0]); break; case 2: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]); + masm->Push((*pending_pushes)[0], (*pending_pushes)[1]); break; case 3: - tasm->Push((*pending_pushes)[0], (*pending_pushes)[1], + masm->Push((*pending_pushes)[0], (*pending_pushes)[1], (*pending_pushes)[2]); break; default: @@ -1045,7 +1045,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm, } void AdjustStackPointerForTailCall( - TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp, + MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp, ZoneVector* pending_pushes = nullptr, bool allow_shrinkage = true) { int current_sp_offset = state->GetSPToFPSlotCount() + @@ -1053,15 +1053,15 @@ void AdjustStackPointerForTailCall( int stack_slot_delta = new_slot_above_sp - current_sp_offset; if (stack_slot_delta > 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); + masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); state->IncreaseSPDelta(stack_slot_delta); } else if (allow_shrinkage && stack_slot_delta < 0) { if (pending_pushes != nullptr) { - FlushPendingPushRegisters(tasm, state, pending_pushes); + FlushPendingPushRegisters(masm, state, pending_pushes); } - tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); + masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize)); state->IncreaseSPDelta(stack_slot_delta); } } @@ -1083,7 +1083,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); AdjustStackPointerForTailCall( - tasm(), frame_access_state(), + masm(), frame_access_state(), destination_location.index() - pending_pushes.size(), &pending_pushes); // Pushes of non-register data types are not supported. @@ -1093,20 +1093,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, // TODO(arm): We can push more than 3 registers at once. Add support in // the macro-assembler for pushing a list of registers. if (pending_pushes.size() == 3) { - FlushPendingPushRegisters(tasm(), frame_access_state(), + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } move->Eliminate(); } - FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes); + FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes); } - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset, nullptr, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), + AdjustStackPointerForTailCall(masm(), frame_access_state(), first_unused_slot_offset); } @@ -1134,8 +1134,8 @@ void CodeGenerator::BailoutIfDeoptimized() { } int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize; - __ LoadTaggedPointerField( - ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0); + __ LoadTaggedField(ip, MemOperand(kJavaScriptCallCodeStartRegister, offset), + r0); __ LoadS32(ip, FieldMemOperand(ip, Code::kKindSpecificFlagsOffset)); __ TestBit(ip, InstructionStream::kMarkedForDeoptimizationBit); __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), @@ -1218,7 +1218,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } else { // We cannot use the constant pool to load the target since // we've already restored the caller's frame. - ConstantPoolUnavailableScope constant_pool_unavailable(tasm()); + ConstantPoolUnavailableScope constant_pool_unavailable(masm()); __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); } frame_access_state()->ClearSPDelta(); @@ -1240,14 +1240,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register func = i.InputRegister(0); if (v8_flags.debug_code) { // Check the function's context matches the context argument. - __ LoadTaggedPointerField( - kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); + __ LoadTaggedField(kScratchReg, + FieldMemOperand(func, JSFunction::kContextOffset)); __ CmpS64(cp, kScratchReg); __ Assert(eq, AbortReason::kWrongFunctionContext); } static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch"); - __ LoadTaggedPointerField(r4, - FieldMemOperand(func, JSFunction::kCodeOffset)); + __ LoadTaggedField(r4, FieldMemOperand(func, JSFunction::kCodeOffset)); __ CallCodeObject(r4); RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -1351,7 +1350,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -3175,14 +3174,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand()); break; } - case kS390_LoadDecompressTaggedPointer: { + case kS390_LoadDecompressTagged: { CHECK(instr->HasOutput()); - __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand()); - break; - } - case kS390_LoadDecompressAnyTagged: { - CHECK(instr->HasOutput()); - __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand()); + __ DecompressTagged(i.OutputRegister(), i.MemoryOperand()); break; } default: @@ -3580,9 +3574,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ mov(argc_reg, Operand(parameter_slots)); __ bind(&skip); } - __ DropArguments(argc_reg, TurboAssembler::kCountIsInteger, + __ DropArguments(argc_reg, MacroAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + MacroAssembler::kCountIncludesReceiver); } else if (additional_pop_count->IsImmediate()) { int additional_count = g.ToConstant(additional_pop_count).ToInt32(); __ Drop(parameter_slots + additional_count); diff --git a/src/compiler/backend/s390/instruction-codes-s390.h b/src/compiler/backend/s390/instruction-codes-s390.h index f362cddcf7..70e3d8b4b6 100644 --- a/src/compiler/backend/s390/instruction-codes-s390.h +++ b/src/compiler/backend/s390/instruction-codes-s390.h @@ -398,8 +398,7 @@ namespace compiler { V(S390_LoadSimd128) \ V(S390_StoreCompressTagged) \ V(S390_LoadDecompressTaggedSigned) \ - V(S390_LoadDecompressTaggedPointer) \ - V(S390_LoadDecompressAnyTagged) + V(S390_LoadDecompressTagged) // Addressing modes represent the "shape" of inputs to an instruction. // Many instructions support multiple addressing modes. Addressing modes diff --git a/src/compiler/backend/s390/instruction-scheduler-s390.cc b/src/compiler/backend/s390/instruction-scheduler-s390.cc index fa0a60a019..91fbd8d139 100644 --- a/src/compiler/backend/s390/instruction-scheduler-s390.cc +++ b/src/compiler/backend/s390/instruction-scheduler-s390.cc @@ -358,8 +358,7 @@ int InstructionScheduler::GetTargetInstructionFlags( case kS390_LoadReverseSimd128: case kS390_Peek: case kS390_LoadDecompressTaggedSigned: - case kS390_LoadDecompressTaggedPointer: - case kS390_LoadDecompressAnyTagged: + case kS390_LoadDecompressTagged: case kS390_S128Load8Splat: case kS390_S128Load16Splat: case kS390_S128Load32Splat: diff --git a/src/compiler/backend/s390/instruction-selector-s390.cc b/src/compiler/backend/s390/instruction-selector-s390.cc index a0192b0022..b59c005ec6 100644 --- a/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/src/compiler/backend/s390/instruction-selector-s390.cc @@ -303,10 +303,10 @@ ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) { opcode = kS390_LoadDecompressTaggedSigned; break; case MachineRepresentation::kTaggedPointer: - opcode = kS390_LoadDecompressTaggedPointer; + opcode = kS390_LoadDecompressTagged; break; case MachineRepresentation::kTagged: - opcode = kS390_LoadDecompressAnyTagged; + opcode = kS390_LoadDecompressTagged; break; #else case MachineRepresentation::kTaggedSigned: // Fall through. diff --git a/src/compiler/backend/x64/code-generator-x64.cc b/src/compiler/backend/x64/code-generator-x64.cc index 7e8a211e5f..7924a6fe60 100644 --- a/src/compiler/backend/x64/code-generator-x64.cc +++ b/src/compiler/backend/x64/code-generator-x64.cc @@ -4,6 +4,7 @@ #include +#include "src/base/optional.h" #include "src/base/overflowing-math.h" #include "src/codegen/assembler.h" #include "src/codegen/cpu-features.h" @@ -33,7 +34,7 @@ namespace v8 { namespace internal { namespace compiler { -#define __ tasm()-> +#define __ masm()-> // Adds X64 specific methods for decoding operands. class X64OperandConverter : public InstructionOperandConverter { @@ -293,7 +294,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode { void Generate() final { if (COMPRESS_POINTERS_BOOL) { - __ DecompressTaggedPointer(value_, value_); + __ DecompressTagged(value_, value_); } __ CheckPageFlag( value_, scratch0_, @@ -334,29 +335,29 @@ class OutOfLineRecordWrite final : public OutOfLineCode { }; template -int EmitStore(TurboAssembler* tasm, Operand operand, Register value, - MachineRepresentation rep) { +int EmitStore(MacroAssembler* masm, Operand operand, Register value, + MachineRepresentation rep) { int store_instr_offset; if (order == std::memory_order_relaxed) { - store_instr_offset = tasm->pc_offset(); + store_instr_offset = masm->pc_offset(); switch (rep) { case MachineRepresentation::kWord8: - tasm->movb(operand, value); + masm->movb(operand, value); break; case MachineRepresentation::kWord16: - tasm->movw(operand, value); + masm->movw(operand, value); break; case MachineRepresentation::kWord32: - tasm->movl(operand, value); + masm->movl(operand, value); break; case MachineRepresentation::kWord64: - tasm->movq(operand, value); + masm->movq(operand, value); break; case MachineRepresentation::kTagged: - tasm->StoreTaggedField(operand, value); + masm->StoreTaggedField(operand, value); break; case MachineRepresentation::kSandboxedPointer: - tasm->StoreSandboxedPointerField(operand, value); + masm->StoreSandboxedPointerField(operand, value); break; default: UNREACHABLE(); @@ -367,28 +368,28 @@ int EmitStore(TurboAssembler* tasm, Operand operand, Register value, DCHECK_EQ(order, std::memory_order_seq_cst); switch (rep) { case MachineRepresentation::kWord8: - tasm->movq(kScratchRegister, value); - store_instr_offset = tasm->pc_offset(); - tasm->xchgb(kScratchRegister, operand); + masm->movq(kScratchRegister, value); + store_instr_offset = masm->pc_offset(); + masm->xchgb(kScratchRegister, operand); break; case MachineRepresentation::kWord16: - tasm->movq(kScratchRegister, value); - store_instr_offset = tasm->pc_offset(); - tasm->xchgw(kScratchRegister, operand); + masm->movq(kScratchRegister, value); + store_instr_offset = masm->pc_offset(); + masm->xchgw(kScratchRegister, operand); break; case MachineRepresentation::kWord32: - tasm->movq(kScratchRegister, value); - store_instr_offset = tasm->pc_offset(); - tasm->xchgl(kScratchRegister, operand); + masm->movq(kScratchRegister, value); + store_instr_offset = masm->pc_offset(); + masm->xchgl(kScratchRegister, operand); break; case MachineRepresentation::kWord64: - tasm->movq(kScratchRegister, value); - store_instr_offset = tasm->pc_offset(); - tasm->xchgq(kScratchRegister, operand); + masm->movq(kScratchRegister, value); + store_instr_offset = masm->pc_offset(); + masm->xchgq(kScratchRegister, operand); break; case MachineRepresentation::kTagged: - store_instr_offset = tasm->pc_offset(); - tasm->AtomicStoreTaggedField(operand, value); + store_instr_offset = masm->pc_offset(); + masm->AtomicStoreTaggedField(operand, value); break; default: UNREACHABLE(); @@ -397,29 +398,29 @@ int EmitStore(TurboAssembler* tasm, Operand operand, Register value, } template -int EmitStore(TurboAssembler* tasm, Operand operand, Immediate value, - MachineRepresentation rep); +int EmitStore(MacroAssembler* masm, Operand operand, Immediate value, + MachineRepresentation rep); template <> -int EmitStore(TurboAssembler* tasm, Operand operand, - Immediate value, - MachineRepresentation rep) { - int store_instr_offset = tasm->pc_offset(); +int EmitStore(MacroAssembler* masm, Operand operand, + Immediate value, + MachineRepresentation rep) { + int store_instr_offset = masm->pc_offset(); switch (rep) { case MachineRepresentation::kWord8: - tasm->movb(operand, value); + masm->movb(operand, value); break; case MachineRepresentation::kWord16: - tasm->movw(operand, value); + masm->movw(operand, value); break; case MachineRepresentation::kWord32: - tasm->movl(operand, value); + masm->movl(operand, value); break; case MachineRepresentation::kWord64: - tasm->movq(operand, value); + masm->movq(operand, value); break; case MachineRepresentation::kTagged: - tasm->StoreTaggedField(operand, value); + masm->StoreTaggedField(operand, value); break; default: UNREACHABLE(); @@ -509,7 +510,7 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, #endif // V8_ENABLE_WEBASSEMBLY #ifdef V8_IS_TSAN -void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm, +void EmitMemoryProbeForTrapHandlerIfNeeded(MacroAssembler* masm, Register scratch, Operand operand, StubCallMode mode, int size) { #if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED @@ -522,16 +523,16 @@ void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm, mode == StubCallMode::kCallWasmRuntimeStub) { switch (size) { case kInt8Size: - tasm->movb(scratch, operand); + masm->movb(scratch, operand); break; case kInt16Size: - tasm->movw(scratch, operand); + masm->movw(scratch, operand); break; case kInt32Size: - tasm->movl(scratch, operand); + masm->movl(scratch, operand); break; case kInt64Size: - tasm->movq(scratch, operand); + masm->movq(scratch, operand); break; default: UNREACHABLE(); @@ -569,14 +570,14 @@ class OutOfLineTSANStore : public OutOfLineCode { // A direct call to a wasm runtime stub defined in this module. // Just encode the stub index. This will be patched when the code // is added to the native module and copied into wasm code space. - tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, + masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, StubCallMode::kCallWasmRuntimeStub, memory_order_); return; } #endif // V8_ENABLE_WEBASSEMBLY - tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, + masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_, StubCallMode::kCallBuiltinPointer, memory_order_); } @@ -592,7 +593,7 @@ class OutOfLineTSANStore : public OutOfLineCode { Zone* zone_; }; -void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm, +void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, MacroAssembler* masm, Operand operand, Register value_reg, X64OperandConverter& i, StubCallMode mode, int size, std::memory_order order) { @@ -606,45 +607,45 @@ void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm, Register scratch0 = i.TempRegister(0); auto tsan_ool = zone->New(codegen, operand, value_reg, scratch0, mode, size, order); - tasm->jmp(tsan_ool->entry()); - tasm->bind(tsan_ool->exit()); + masm->jmp(tsan_ool->entry()); + masm->bind(tsan_ool->exit()); } template -Register GetTSANValueRegister(TurboAssembler* tasm, Register value, +Register GetTSANValueRegister(MacroAssembler* masm, Register value, X64OperandConverter& i, MachineRepresentation rep) { if (rep == MachineRepresentation::kSandboxedPointer) { // SandboxedPointers need to be encoded. Register value_reg = i.TempRegister(1); - tasm->movq(value_reg, value); - tasm->EncodeSandboxedPointer(value_reg); + masm->movq(value_reg, value); + masm->EncodeSandboxedPointer(value_reg); return value_reg; } return value; } template -Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value, +Register GetTSANValueRegister(MacroAssembler* masm, Immediate value, X64OperandConverter& i, MachineRepresentation rep); template <> Register GetTSANValueRegister( - TurboAssembler* tasm, Immediate value, X64OperandConverter& i, + MacroAssembler* masm, Immediate value, X64OperandConverter& i, MachineRepresentation rep) { Register value_reg = i.TempRegister(1); - tasm->movq(value_reg, value); + masm->movq(value_reg, value); if (rep == MachineRepresentation::kSandboxedPointer) { // SandboxedPointers need to be encoded. - tasm->EncodeSandboxedPointer(value_reg); + masm->EncodeSandboxedPointer(value_reg); } return value_reg; } template void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen, - TurboAssembler* tasm, Operand operand, ValueT value, + MacroAssembler* masm, Operand operand, ValueT value, X64OperandConverter& i, StubCallMode stub_call_mode, MachineRepresentation rep, Instruction* instr) { // The FOR_TESTING code doesn't initialize the root register. We can't call @@ -654,17 +655,17 @@ void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen, // path. It is not crucial, but it would be nice to remove this restriction. if (codegen->code_kind() != CodeKind::FOR_TESTING) { if (instr->HasMemoryAccessMode()) { - EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), - instr, tasm->pc_offset()); + EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, + masm->pc_offset()); } int size = ElementSizeInBytes(rep); - EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand, + EmitMemoryProbeForTrapHandlerIfNeeded(masm, i.TempRegister(0), operand, stub_call_mode, size); - Register value_reg = GetTSANValueRegister(tasm, value, i, rep); - EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode, + Register value_reg = GetTSANValueRegister(masm, value, i, rep); + EmitTSANStoreOOL(zone, codegen, masm, operand, value_reg, i, stub_call_mode, size, order); } else { - int store_instr_offset = EmitStore(tasm, operand, value, rep); + int store_instr_offset = EmitStore(masm, operand, value, rep); if (instr->HasMemoryAccessMode()) { EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, store_instr_offset); @@ -718,7 +719,7 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode { }; void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, - TurboAssembler* tasm, Operand operand, + MacroAssembler* masm, Operand operand, X64OperandConverter& i, StubCallMode mode, int size) { // The FOR_TESTING code doesn't initialize the root register. We can't call @@ -731,26 +732,26 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, Register scratch0 = i.TempRegister(0); auto tsan_ool = zone->New(codegen, operand, scratch0, mode, size); - tasm->jmp(tsan_ool->entry()); - tasm->bind(tsan_ool->exit()); + masm->jmp(tsan_ool->entry()); + masm->bind(tsan_ool->exit()); } #else template void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen, - TurboAssembler* tasm, Operand operand, ValueT value, + MacroAssembler* masm, Operand operand, ValueT value, X64OperandConverter& i, StubCallMode stub_call_mode, MachineRepresentation rep, Instruction* instr) { DCHECK(order == std::memory_order_relaxed || order == std::memory_order_seq_cst); - int store_instr_off = EmitStore(tasm, operand, value, rep); + int store_instr_off = EmitStore(masm, operand, value, rep); if (instr->HasMemoryAccessMode()) { EmitOOLTrapIfNeeded(zone, codegen, instr->opcode(), instr, store_instr_off); } } void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, - TurboAssembler* tasm, Operand operand, + MacroAssembler* masm, Operand operand, X64OperandConverter& i, StubCallMode mode, int size) {} #endif // V8_IS_TSAN @@ -923,7 +924,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, #define ASSEMBLE_AVX_BINOP(asm_instr) \ do { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ if (HasAddressingMode(instr)) { \ size_t index = 1; \ Operand right = i.MemoryOperand(&index); \ @@ -983,7 +984,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, #define ASSEMBLE_SIMD_BINOP(opcode) \ do { \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \ i.InputSimd128Register(1)); \ } else { \ @@ -1015,7 +1016,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, XMMRegister dst = i.OutputSimd128Register(); \ byte input_index = instr->InputCount() == 2 ? 1 : 0; \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ DCHECK(instr->InputAt(input_index)->IsSimd128Register()); \ __ v##opcode(dst, i.InputSimd128Register(0), \ i.InputSimd128Register(input_index)); \ @@ -1030,7 +1031,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, XMMRegister dst = i.OutputSimd128Register(); \ XMMRegister src = i.InputSimd128Register(0); \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ DCHECK(instr->InputAt(1)->IsSimd128Register()); \ __ v##opcode(dst, src, i.InputSimd128Register(1), imm); \ } else { \ @@ -1061,7 +1062,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, XMMRegister dst = i.OutputSimd128Register(); \ if (HasImmediateInput(instr, 1)) { \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(dst, i.InputSimd128Register(0), \ byte{i.InputInt##width(1)}); \ } else { \ @@ -1074,7 +1075,7 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, __ andq(kScratchRegister, Immediate(mask)); \ __ Movq(kScratchDoubleReg, kScratchRegister); \ if (CpuFeatures::IsSupported(AVX)) { \ - CpuFeatureScope avx_scope(tasm(), AVX); \ + CpuFeatureScope avx_scope(masm(), AVX); \ __ v##opcode(dst, i.InputSimd128Register(0), kScratchDoubleReg); \ } else { \ DCHECK_EQ(dst, i.InputSimd128Register(0)); \ @@ -1102,13 +1103,13 @@ void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen, EmitOOLTrapIfNeeded(zone(), this, opcode, instr, load_offset); \ } while (false) -#define ASSEMBLE_SEQ_CST_STORE(rep) \ - do { \ - Register value = i.InputRegister(0); \ - Operand operand = i.MemoryOperand(1); \ - EmitTSANAwareStore( \ - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \ - rep, instr); \ +#define ASSEMBLE_SEQ_CST_STORE(rep) \ + do { \ + Register value = i.InputRegister(0); \ + Operand operand = i.MemoryOperand(1); \ + EmitTSANAwareStore( \ + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), rep, \ + instr); \ } while (false) void CodeGenerator::AssembleDeconstructFrame() { @@ -1127,7 +1128,7 @@ void CodeGenerator::AssemblePrepareTailCall() { namespace { void AdjustStackPointerForTailCall(Instruction* instr, - TurboAssembler* assembler, Linkage* linkage, + MacroAssembler* assembler, Linkage* linkage, OptimizedCompilationInfo* info, FrameAccessState* state, int new_slot_above_sp, @@ -1163,7 +1164,7 @@ void AdjustStackPointerForTailCall(Instruction* instr, } } -void SetupSimdImmediateInRegister(TurboAssembler* assembler, uint32_t* imms, +void SetupSimdImmediateInRegister(MacroAssembler* assembler, uint32_t* imms, XMMRegister reg) { assembler->Move(reg, make_uint64(imms[3], imms[2]), make_uint64(imms[1], imms[0])); @@ -1186,7 +1187,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, LocationOperand destination_location( LocationOperand::cast(move->destination())); InstructionOperand source(move->source()); - AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(), + AdjustStackPointerForTailCall(instr, masm(), linkage(), info(), frame_access_state(), destination_location.index()); if (source.IsStackSlot()) { @@ -1205,14 +1206,14 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, move->Eliminate(); } } - AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(), + AdjustStackPointerForTailCall(instr, masm(), linkage(), info(), frame_access_state(), first_unused_slot_offset, false); } void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, int first_unused_slot_offset) { - AdjustStackPointerForTailCall(instr, tasm(), linkage(), info(), + AdjustStackPointerForTailCall(instr, masm(), linkage(), info(), frame_access_state(), first_unused_slot_offset); } @@ -1358,8 +1359,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Assert(equal, AbortReason::kWrongFunctionContext); } static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch"); - __ LoadTaggedPointerField(rcx, - FieldOperand(func, JSFunction::kCodeOffset)); + __ LoadTaggedField(rcx, FieldOperand(func, JSFunction::kCodeOffset)); __ CallCodeObject(rcx); frame_access_state()->ClearSPDelta(); RecordCallPosition(instr); @@ -1464,7 +1464,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( { // We don't actually want to generate a pile of code for this, so just // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); + FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE); __ Call(BUILTIN_CODE(isolate(), AbortCSADcheck), RelocInfo::CODE_TARGET); } @@ -1561,12 +1561,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DetermineStubCallMode()); if (arch_opcode == kArchStoreWithWriteBarrier) { EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kTagged, instr); } else { DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kTagged, instr); } if (mode > RecordWriteMode::kValueIsPointer) { @@ -1873,7 +1873,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_SSE_UNOP(Cvtss2sd); break; case kSSEFloat32Round: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); RoundingMode const mode = static_cast(MiscField::decode(instr->opcode())); __ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); @@ -1930,7 +1930,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( // The following 2 instruction implicitly use rax. __ fnstsw_ax(); if (CpuFeatures::IsSupported(SAHF)) { - CpuFeatureScope sahf_scope(tasm(), SAHF); + CpuFeatureScope sahf_scope(masm(), SAHF); __ sahf(); } else { __ shrl(rax, Immediate(8)); @@ -2066,7 +2066,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( ASSEMBLE_SSE_UNOP(Sqrtsd); break; case kSSEFloat64Round: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); RoundingMode const mode = static_cast(MiscField::decode(instr->opcode())); __ Roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode); @@ -2389,7 +2389,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kAVXFloat32Cmp: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); if (instr->InputAt(1)->IsFPRegister()) { __ vucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); } else { @@ -2413,7 +2413,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister()); break; case kAVXFloat64Cmp: { - CpuFeatureScope avx_scope(tasm(), AVX); + CpuFeatureScope avx_scope(masm(), AVX); if (instr->InputAt(1)->IsFPRegister()) { __ vucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); } else { @@ -2487,12 +2487,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(Immediate(i.InputInt8(index))); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord8, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord8, instr); } break; @@ -2522,12 +2522,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(Immediate(i.InputInt16(index))); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord16, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord16, instr); } break; @@ -2538,7 +2538,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasAddressingMode(instr)) { Operand address(i.MemoryOperand()); __ movl(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kInt32Size); } else { if (HasRegisterInput(instr, 0)) { @@ -2554,12 +2554,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(i.InputImmediate(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord32, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord32, instr); } } @@ -2572,23 +2572,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CHECK(instr->HasOutput()); Operand address(i.MemoryOperand()); __ DecompressTaggedSigned(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kTaggedSize); break; } - case kX64MovqDecompressTaggedPointer: { + case kX64MovqDecompressTagged: { CHECK(instr->HasOutput()); Operand address(i.MemoryOperand()); - __ DecompressTaggedPointer(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, - DetermineStubCallMode(), kTaggedSize); - break; - } - case kX64MovqDecompressAnyTagged: { - CHECK(instr->HasOutput()); - Operand address(i.MemoryOperand()); - __ DecompressAnyTagged(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + __ DecompressTagged(i.OutputRegister(), address); + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kTaggedSize); break; } @@ -2599,12 +2591,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(i.InputImmediate(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kTagged, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kTagged, instr); } break; @@ -2615,7 +2607,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Register dst = i.OutputRegister(); __ movq(dst, address); __ DecodeSandboxedPointer(dst); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kSystemPointerSize); break; @@ -2627,7 +2619,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( CHECK(!HasImmediateInput(instr, index)); Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kSandboxedPointer, instr); break; } @@ -2636,7 +2628,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); Operand address(i.MemoryOperand()); __ movq(i.OutputRegister(), address); - EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i, + EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, masm(), address, i, DetermineStubCallMode(), kInt64Size); } else { size_t index = 0; @@ -2644,12 +2636,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (HasImmediateInput(instr, index)) { Immediate value(i.InputImmediate(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord64, instr); } else { Register value(i.InputRegister(index)); EmitTSANAwareStore( - zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), + zone(), this, masm(), operand, value, i, DetermineStubCallMode(), MachineRepresentation::kWord64, instr); } } @@ -3206,7 +3198,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kX64I64x2Eq: { - CpuFeatureScope sse_scope(tasm(), SSE4_1); + CpuFeatureScope sse_scope(masm(), SSE4_1); ASSEMBLE_SIMD_BINOP(pcmpeqq); break; } @@ -3486,7 +3478,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( for (int j = 0; j < 4; j++) { imm[j] = i.InputUint32(j); } - SetupSimdImmediateInRegister(tasm(), imm, dst); + SetupSimdImmediateInRegister(masm(), imm, dst); break; } case kX64S128Zero: { @@ -3994,7 +3986,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( mask[j - 1] = i.InputUint32(j); } - SetupSimdImmediateInRegister(tasm(), mask, tmp_simd); + SetupSimdImmediateInRegister(masm(), mask, tmp_simd); __ Pshufb(dst, tmp_simd); } else { // two input operands DCHECK_NE(tmp_simd, i.InputSimd128Register(1)); @@ -4008,7 +4000,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( mask1[j - 2] |= (lane < kSimd128Size ? lane : 0x80) << k; } } - SetupSimdImmediateInRegister(tasm(), mask1, tmp_simd); + SetupSimdImmediateInRegister(masm(), mask1, tmp_simd); __ Pshufb(kScratchDoubleReg, tmp_simd); uint32_t mask2[4] = {}; if (instr->InputAt(1)->IsSimd128Register()) { @@ -4024,7 +4016,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( mask2[j - 2] |= (lane >= kSimd128Size ? (lane & 0x0F) : 0x80) << k; } } - SetupSimdImmediateInRegister(tasm(), mask2, tmp_simd); + SetupSimdImmediateInRegister(masm(), mask2, tmp_simd); __ Pshufb(dst, tmp_simd); __ Por(dst, kScratchDoubleReg); } @@ -4755,6 +4747,34 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr, __ bind(&done); } +void CodeGenerator::AssembleArchBinarySearchSwitchRange( + Register input, RpoNumber def_block, std::pair* begin, + std::pair* end, base::Optional& last_cmp_value) { + if (end - begin < kBinarySearchSwitchMinimalCases) { + if (last_cmp_value && *last_cmp_value == begin->first) { + // No need to do another repeat cmp. + masm()->j(equal, begin->second); + ++begin; + } + + while (begin != end) { + masm()->JumpIfEqual(input, begin->first, begin->second); + ++begin; + } + AssembleArchJumpRegardlessOfAssemblyOrder(def_block); + return; + } + auto middle = begin + (end - begin) / 2; + Label less_label; + masm()->JumpIfLessThan(input, middle->first, &less_label); + last_cmp_value = middle->first; + AssembleArchBinarySearchSwitchRange(input, def_block, middle, end, + last_cmp_value); + masm()->bind(&less_label); + AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle, + last_cmp_value); +} + void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { X64OperandConverter i(this, instr); Register input = i.InputRegister(0); @@ -4762,8 +4782,10 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { for (size_t index = 2; index < instr->InputCount(); index += 2) { cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))}); } + base::Optional last_cmp_value; AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(), - cases.data() + cases.size()); + cases.data() + cases.size(), + last_cmp_value); } void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { @@ -5057,8 +5079,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { __ j(greater, &mismatch_return, Label::kNear); __ Ret(parameter_slots * kSystemPointerSize, scratch_reg); __ bind(&mismatch_return); - __ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); // We use a return instead of a jump for better return address prediction. __ Ret(); } else if (additional_pop_count->IsImmediate()) { @@ -5082,7 +5104,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { } } -void CodeGenerator::FinishCode() { tasm()->PatchConstPool(); } +void CodeGenerator::FinishCode() { masm()->PatchConstPool(); } void CodeGenerator::PrepareForDeoptimizationExits( ZoneDeque* exits) {} diff --git a/src/compiler/backend/x64/instruction-codes-x64.h b/src/compiler/backend/x64/instruction-codes-x64.h index a0fab0aab9..1d74783d4b 100644 --- a/src/compiler/backend/x64/instruction-codes-x64.h +++ b/src/compiler/backend/x64/instruction-codes-x64.h @@ -173,8 +173,7 @@ namespace compiler { V(X64Float32Abs) \ V(X64Float32Neg) \ V(X64MovqDecompressTaggedSigned) \ - V(X64MovqDecompressTaggedPointer) \ - V(X64MovqDecompressAnyTagged) \ + V(X64MovqDecompressTagged) \ V(X64MovqCompressTagged) \ V(X64MovqEncodeSandboxedPointer) \ V(X64MovqDecodeSandboxedPointer) \ diff --git a/src/compiler/backend/x64/instruction-scheduler-x64.cc b/src/compiler/backend/x64/instruction-scheduler-x64.cc index 11e265605a..02acdb5f04 100644 --- a/src/compiler/backend/x64/instruction-scheduler-x64.cc +++ b/src/compiler/backend/x64/instruction-scheduler-x64.cc @@ -403,8 +403,7 @@ int InstructionScheduler::GetTargetInstructionFlags( } case kX64MovqDecompressTaggedSigned: - case kX64MovqDecompressTaggedPointer: - case kX64MovqDecompressAnyTagged: + case kX64MovqDecompressTagged: case kX64MovqCompressTagged: case kX64MovqDecodeSandboxedPointer: case kX64MovqEncodeSandboxedPointer: diff --git a/src/compiler/backend/x64/instruction-selector-x64.cc b/src/compiler/backend/x64/instruction-selector-x64.cc index a5fdb3529b..1c276abb8f 100644 --- a/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/src/compiler/backend/x64/instruction-selector-x64.cc @@ -224,7 +224,7 @@ class X64OperandGenerator final : public OperandGenerator { m.object().ResolvedValue())) { ptrdiff_t const delta = m.index().ResolvedValue() + - TurboAssemblerBase::RootRegisterOffsetForExternalReference( + MacroAssemblerBase::RootRegisterOffsetForExternalReference( selector()->isolate(), m.object().ResolvedValue()); if (is_int32(delta)) { inputs[(*input_count)++] = TempImmediate(static_cast(delta)); @@ -318,10 +318,8 @@ ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) { opcode = kX64MovqDecompressTaggedSigned; break; case MachineRepresentation::kTaggedPointer: - opcode = kX64MovqDecompressTaggedPointer; - break; case MachineRepresentation::kTagged: - opcode = kX64MovqDecompressAnyTagged; + opcode = kX64MovqDecompressTagged; break; #else case MachineRepresentation::kTaggedSigned: // Fall through. @@ -2538,7 +2536,7 @@ void VisitWord64EqualImpl(InstructionSelector* selector, Node* node, return VisitCompare( selector, opcode, g.TempImmediate( - TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), + MacroAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), g.UseRegister(m.left().node()), cont); } } @@ -2576,7 +2574,7 @@ void VisitWord32EqualImpl(InstructionSelector* selector, Node* node, return VisitCompare( selector, opcode, g.TempImmediate( - TurboAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), + MacroAssemblerBase::RootRegisterOffsetForRootIndex(root_index)), g.UseRegister(left), cont); } } diff --git a/src/compiler/basic-block-instrumentor.cc b/src/compiler/basic-block-instrumentor.cc index aa97ce705c..448bed84c9 100644 --- a/src/compiler/basic-block-instrumentor.cc +++ b/src/compiler/basic-block-instrumentor.cc @@ -84,7 +84,7 @@ BasicBlockProfilerData* BasicBlockInstrumentor::Instrument( // PatchBasicBlockCountersReference). An important and subtle point: we // cannot use the root handle basic_block_counters_marker_handle() and must // create a new separate handle. Otherwise - // TurboAssemblerBase::IndirectLoadConstant would helpfully emit a + // MacroAssemblerBase::IndirectLoadConstant would helpfully emit a // root-relative load rather than putting this value in the constants table // where we expect it to be for patching. counters_array = graph->NewNode(common.HeapConstant(Handle::New( diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc index 042cc0179b..681a63e104 100644 --- a/src/compiler/effect-control-linearizer.cc +++ b/src/compiler/effect-control-linearizer.cc @@ -1851,7 +1851,7 @@ Node* EffectControlLinearizer::LowerChangeTaggedToTaggedSigned(Node* node) { Node* value = node->InputAt(0); auto if_not_smi = __ MakeDeferredLabel(); - auto done = __ MakeLabel(MachineRepresentation::kWord32); + auto done = __ MakeLabel(MachineRepresentation::kTaggedSigned); Node* check = ObjectIsSmi(value); __ GotoIfNot(check, &if_not_smi); @@ -4152,11 +4152,10 @@ Node* EffectControlLinearizer::StringCharCodeAt(Node* receiver, __ Bind(&if_seqstring); { - Node* receiver_is_onebyte = __ Word32Equal( + Node* receiver_is_onebyte = __ Word32Equal(__ Word32And(receiver_instance_type, __ Int32Constant(kStringEncodingMask)), - __ Int32Constant(kTwoByteStringTag)), - __ Int32Constant(0)); + __ Int32Constant(kOneByteStringTag)); Node* result = LoadFromSeqString(receiver, position, receiver_is_onebyte); __ Goto(&loop_done, result); } diff --git a/src/compiler/loop-analysis.cc b/src/compiler/loop-analysis.cc index 71c82cd87d..a50582b6fc 100644 --- a/src/compiler/loop-analysis.cc +++ b/src/compiler/loop-analysis.cc @@ -4,6 +4,7 @@ #include "src/compiler/loop-analysis.h" +#include "src/base/v8-fallthrough.h" #include "src/codegen/tick-counter.h" #include "src/compiler/all-nodes.h" #include "src/compiler/common-operator.h" @@ -553,7 +554,7 @@ LoopTree* LoopFinder::BuildLoopTree(Graph* graph, TickCounter* tick_counter, // static ZoneUnorderedSet* LoopFinder::FindSmallInnermostLoopFromHeader( Node* loop_header, AllNodes& all_nodes, Zone* zone, size_t max_size, - bool calls_are_large) { + Purpose purpose) { auto* visited = zone->New>(zone); std::vector queue; @@ -565,6 +566,7 @@ ZoneUnorderedSet* LoopFinder::FindSmallInnermostLoopFromHeader( for (Node * use_name : node->uses()) { \ if (condition && visited->count(use_name) == 0) queue.push_back(use_name); \ } + bool has_instruction_worth_peeling = false; while (!queue.empty()) { Node* node = queue.back(); @@ -596,16 +598,16 @@ ZoneUnorderedSet* LoopFinder::FindSmallInnermostLoopFromHeader( } // All uses are outside the loop, do nothing. break; - // If {calls_are_large}, call nodes are considered to have unbounded size, + // If unrolling, call nodes are considered to have unbounded size, // i.e. >max_size, with the exception of certain wasm builtins. case IrOpcode::kTailCall: case IrOpcode::kJSWasmCall: case IrOpcode::kJSCall: - if (calls_are_large) return nullptr; + if (purpose == Purpose::kLoopUnrolling) return nullptr; ENQUEUE_USES(use, true) break; case IrOpcode::kCall: { - if (!calls_are_large) { + if (purpose == Purpose::kLoopPeeling) { ENQUEUE_USES(use, true); break; } @@ -632,14 +634,16 @@ ZoneUnorderedSet* LoopFinder::FindSmallInnermostLoopFromHeader( WasmCode::kWasmRethrow, WasmCode::kWasmRethrowExplicitContext, // Fast wasm-gc operations. WasmCode::kWasmRefFunc}; - if (std::count(unrollable_builtins, - unrollable_builtins + arraysize(unrollable_builtins), - info) == 0) { + if (std::count(std::begin(unrollable_builtins), + std::end(unrollable_builtins), info) == 0) { return nullptr; } ENQUEUE_USES(use, true) break; } + case IrOpcode::kStringPrepareForGetCodeunit: + has_instruction_worth_peeling = true; + V8_FALLTHROUGH; default: ENQUEUE_USES(use, true) break; @@ -672,6 +676,12 @@ ZoneUnorderedSet* LoopFinder::FindSmallInnermostLoopFromHeader( } } + // Only peel functions containing instructions for which loop peeling is known + // to be useful. TODO(7748): Add more instructions to get more benefits out of + // loop peeling. + if (purpose == Purpose::kLoopPeeling && !has_instruction_worth_peeling) { + return nullptr; + } return visited; } #endif // V8_ENABLE_WEBASSEMBLY diff --git a/src/compiler/loop-analysis.h b/src/compiler/loop-analysis.h index d3c53b850b..07d30b8cda 100644 --- a/src/compiler/loop-analysis.h +++ b/src/compiler/loop-analysis.h @@ -178,9 +178,11 @@ class V8_EXPORT_PRIVATE LoopFinder { static LoopTree* BuildLoopTree(Graph* graph, TickCounter* tick_counter, Zone* temp_zone); - static bool HasMarkedExits(LoopTree* loop_tree_, const LoopTree::Loop* loop); + static bool HasMarkedExits(LoopTree* loop_tree, const LoopTree::Loop* loop); #if V8_ENABLE_WEBASSEMBLY + enum class Purpose { kLoopPeeling, kLoopUnrolling }; + // Find all nodes in the loop headed by {loop_header} if it contains no nested // loops. // Assumption: *if* this loop has no nested loops, all exits from the loop are @@ -192,7 +194,7 @@ class V8_EXPORT_PRIVATE LoopFinder { // 3) a nested loop is found in the loop. static ZoneUnorderedSet* FindSmallInnermostLoopFromHeader( Node* loop_header, AllNodes& all_nodes, Zone* zone, size_t max_size, - bool calls_are_large); + Purpose purpose); #endif }; diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc index 56e37fcea3..228aa7cd25 100644 --- a/src/compiler/machine-operator-reducer.cc +++ b/src/compiler/machine-operator-reducer.cc @@ -381,6 +381,10 @@ Node* MachineOperatorReducer::TruncateInt64ToInt32(Node* value) { return reduction.Changed() ? reduction.replacement() : node; } +Node* MachineOperatorReducer::ChangeInt32ToInt64(Node* value) { + return graph()->NewNode(machine()->ChangeInt32ToInt64(), value); +} + // Perform constant folding and strength reduction on machine operators. Reduction MachineOperatorReducer::Reduce(Node* node) { switch (node->opcode()) { @@ -1282,7 +1286,10 @@ Reduction MachineOperatorReducer::ReduceInt64Div(Node* node) { } if (m.LeftEqualsRight()) { // x / x => x != 0 Node* const zero = Int64Constant(0); - return Replace(Word64Equal(Word64Equal(m.left().node(), zero), zero)); + // {Word64Equal} can get reduced to a bool/int32, but we need this + // operation to produce an int64. + return Replace(ChangeInt32ToInt64( + Word64Equal(Word64Equal(m.left().node(), zero), zero))); } if (m.right().Is(-1)) { // x / -1 => 0 - x node->ReplaceInput(0, Int64Constant(0)); @@ -1358,7 +1365,10 @@ Reduction MachineOperatorReducer::ReduceUint64Div(Node* node) { } if (m.LeftEqualsRight()) { // x / x => x != 0 Node* const zero = Int64Constant(0); - return Replace(Word64Equal(Word64Equal(m.left().node(), zero), zero)); + // {Word64Equal} can get reduced to a bool/int32, but we need this + // operation to produce an int64. + return Replace(ChangeInt32ToInt64( + Word64Equal(Word64Equal(m.left().node(), zero), zero))); } if (m.right().HasResolvedValue()) { Node* const dividend = m.left().node(); diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h index d454b67360..77ba737830 100644 --- a/src/compiler/machine-operator-reducer.h +++ b/src/compiler/machine-operator-reducer.h @@ -80,6 +80,7 @@ class V8_EXPORT_PRIVATE MachineOperatorReducer final Node* Uint32Div(Node* dividend, uint32_t divisor); Node* Uint64Div(Node* dividend, uint64_t divisor); Node* TruncateInt64ToInt32(Node* value); + Node* ChangeInt32ToInt64(Node* value); Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); } Reduction ReplaceFloat32(float value) { diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h index 8b9ea40505..d9fab3fc72 100644 --- a/src/compiler/opcodes.h +++ b/src/compiler/opcodes.h @@ -565,7 +565,9 @@ V(WasmArrayGet) \ V(WasmArraySet) \ V(WasmArrayLength) \ - V(WasmArrayInitializeLength) + V(WasmArrayInitializeLength) \ + V(StringAsWtf16) \ + V(StringPrepareForGetCodeunit) #define SIMPLIFIED_OP_LIST(V) \ SIMPLIFIED_CHANGE_OP_LIST(V) \ diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index d1c5e27779..1bb4dccd73 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -1733,7 +1733,8 @@ struct WasmLoopUnrollingPhase { loop_info.header, all_nodes, temp_zone, // Only discover the loop until its size is the maximum unrolled // size for its depth. - maximum_unrollable_size(loop_info.nesting_depth), true); + maximum_unrollable_size(loop_info.nesting_depth), + LoopFinder::Purpose::kLoopUnrolling); if (loop == nullptr) continue; UnrollLoop(loop_info.header, loop, loop_info.nesting_depth, data->graph(), data->common(), temp_zone, data->source_positions(), @@ -1755,7 +1756,8 @@ struct WasmLoopPeelingPhase { ZoneUnorderedSet* loop = LoopFinder::FindSmallInnermostLoopFromHeader( loop_info.header, all_nodes, temp_zone, - v8_flags.wasm_loop_peeling_max_size, false); + v8_flags.wasm_loop_peeling_max_size, + LoopFinder::Purpose::kLoopPeeling); if (loop == nullptr) continue; PeelWasmLoop(loop_info.header, loop, data->graph(), data->common(), temp_zone, data->source_positions(), data->node_origins()); @@ -2224,6 +2226,8 @@ struct WasmGCOptimizationPhase { temp_zone); WasmGCOperatorReducer wasm_gc(&graph_reducer, temp_zone, data->mcgraph(), module); + // Note: if we want to add DeadCodeElimination here, we'll have to update + // the existing reducers to handle kDead and kDeadValue nodes everywhere. AddReducer(data, &graph_reducer, &load_elimination); AddReducer(data, &graph_reducer, &wasm_gc); graph_reducer.ReduceGraph(); @@ -3473,10 +3477,10 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub( CodeGenerator* code_generator = pipeline.code_generator(); wasm::WasmCompilationResult result; - code_generator->tasm()->GetCode( + code_generator->masm()->GetCode( nullptr, &result.code_desc, code_generator->safepoint_table_builder(), static_cast(code_generator->handler_table_offset())); - result.instr_buffer = code_generator->tasm()->ReleaseBuffer(); + result.instr_buffer = code_generator->masm()->ReleaseBuffer(); result.source_positions = code_generator->GetSourcePositionTable(); result.protected_instructions_data = code_generator->GetProtectedInstructionsData(); @@ -3702,11 +3706,11 @@ void Pipeline::GenerateCodeForWasmFunction( auto result = std::make_unique(); CodeGenerator* code_generator = pipeline.code_generator(); - code_generator->tasm()->GetCode( + code_generator->masm()->GetCode( nullptr, &result->code_desc, code_generator->safepoint_table_builder(), static_cast(code_generator->handler_table_offset())); - result->instr_buffer = code_generator->tasm()->ReleaseBuffer(); + result->instr_buffer = code_generator->masm()->ReleaseBuffer(); result->frame_slot_count = code_generator->frame()->GetTotalFrameSlotCount(); result->tagged_parameter_slots = call_descriptor->GetTaggedParameterSlots(); result->source_positions = code_generator->GetSourcePositionTable(); diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc index 14077ab267..be1dba82cd 100644 --- a/src/compiler/simplified-operator.cc +++ b/src/compiler/simplified-operator.cc @@ -1286,6 +1286,22 @@ struct SimplifiedOperatorGlobalCache final { "WasmArrayInitializeLength", 2, 1, 1, 0, 1, 0) {} }; WasmArrayInitializeLengthOperator kWasmArrayInitializeLength; + + struct StringAsWtf16Operator final : public Operator { + StringAsWtf16Operator() + : Operator(IrOpcode::kStringAsWtf16, Operator::kEliminatable, + "StringAsWtf16", 1, 1, 1, 1, 1, 1) {} + }; + StringAsWtf16Operator kStringAsWtf16; + + struct StringPrepareForGetCodeunitOperator final : public Operator { + StringPrepareForGetCodeunitOperator() + : Operator(IrOpcode::kStringPrepareForGetCodeunit, + Operator::kEliminatable, "StringPrepareForGetCodeunit", 1, 1, + 1, 3, 1, 1) {} + }; + StringPrepareForGetCodeunitOperator kStringPrepareForGetCodeunit; + #endif #define SPECULATIVE_NUMBER_BINOP(Name) \ @@ -1519,6 +1535,14 @@ const Operator* SimplifiedOperatorBuilder::IsNotNull() { return &cache_.kIsNotNull; } +const Operator* SimplifiedOperatorBuilder::StringAsWtf16() { + return &cache_.kStringAsWtf16; +} + +const Operator* SimplifiedOperatorBuilder::StringPrepareForGetCodeunit() { + return &cache_.kStringPrepareForGetCodeunit; +} + const Operator* SimplifiedOperatorBuilder::WasmExternInternalize() { return zone()->New(IrOpcode::kWasmExternInternalize, Operator::kEliminatable, "WasmExternInternalize", diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h index f602a62e55..52dbcbab49 100644 --- a/src/compiler/simplified-operator.h +++ b/src/compiler/simplified-operator.h @@ -1162,6 +1162,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* WasmArraySet(const wasm::ArrayType* type); const Operator* WasmArrayLength(); const Operator* WasmArrayInitializeLength(); + const Operator* StringAsWtf16(); + const Operator* StringPrepareForGetCodeunit(); #endif const Operator* DateNow(); diff --git a/src/compiler/turboshaft/type-inference-reducer.h b/src/compiler/turboshaft/type-inference-reducer.h index 4e08b49169..17fa032a00 100644 --- a/src/compiler/turboshaft/type-inference-reducer.h +++ b/src/compiler/turboshaft/type-inference-reducer.h @@ -679,7 +679,18 @@ struct FloatOperationTyper { } static Type Power(const type_t& l, const type_t& r, Zone* zone) { - if (l.is_only_nan() || r.is_only_nan()) return type_t::NaN(); + // x ** NaN => Nan. + if (r.is_only_nan()) return type_t::NaN(); + // x ** +-0 => 1. + if (r.is_constant(0) || r.is_only_minus_zero()) return type_t::Constant(1); + if (l.is_only_nan()) { + // NaN ** 0 => 1. + if (r.Contains(0) || r.has_minus_zero()) { + return type_t::Set({1}, type_t::kNaN, zone); + } + // NaN ** x => NaN (x != +-0). + return type_t::NaN(); + } bool maybe_nan = l.has_nan() || r.has_nan(); // a ** b produces NaN if a < 0 && b is fraction. diff --git a/src/compiler/turboshaft/types.h b/src/compiler/turboshaft/types.h index 44576eab43..494a49194e 100644 --- a/src/compiler/turboshaft/types.h +++ b/src/compiler/turboshaft/types.h @@ -395,6 +395,10 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) WordType : public Type { DCHECK_EQ(set_size(), 1); return set_element(0); } + bool is_constant(word_t value) const { + if (auto c = try_get_constant()) return *c == value; + return false; + } word_t unsigned_min() const { switch (sub_kind()) { case SubKind::kRange: @@ -637,6 +641,12 @@ class EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) FloatType : public Type { DCHECK_EQ(set_size(), 1); return set_element(0); } + bool is_constant(float_t value) const { + if (V8_UNLIKELY(std::isnan(value))) return is_only_nan(); + if (V8_UNLIKELY(IsMinusZero(value))) return is_only_minus_zero(); + if (auto c = try_get_constant()) return *c == value; + return false; + } // Misc bool Contains(float_t value) const; diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc index 4fef18fa81..1f23c2671c 100644 --- a/src/compiler/verifier.cc +++ b/src/compiler/verifier.cc @@ -1731,6 +1731,8 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kWasmArraySet: case IrOpcode::kWasmArrayLength: case IrOpcode::kWasmArrayInitializeLength: + case IrOpcode::kStringAsWtf16: + case IrOpcode::kStringPrepareForGetCodeunit: // TODO(manoskouk): What are the constraints here? break; #endif // V8_ENABLE_WEBASSEMBLY diff --git a/src/compiler/wasm-compiler-definitions.h b/src/compiler/wasm-compiler-definitions.h index e5f9e6d6e6..49d6045b89 100644 --- a/src/compiler/wasm-compiler-definitions.h +++ b/src/compiler/wasm-compiler-definitions.h @@ -42,6 +42,8 @@ V8_INLINE bool operator==(const WasmTypeCheckConfig& p1, return p1.from == p2.from && p1.to == p2.to; } +static constexpr int kCharWidthBailoutSentinel = 3; + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index 89f404e840..21a3f6da00 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -39,6 +39,8 @@ #include "src/logging/counters.h" #include "src/objects/heap-number.h" #include "src/objects/instance-type.h" +#include "src/objects/name.h" +#include "src/objects/string.h" #include "src/roots/roots.h" #include "src/tracing/trace-event.h" #include "src/trap-handler/trap-handler.h" @@ -2965,38 +2967,14 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, } } -Node* WasmGraphBuilder::BuildLoadExternalPointerFromObject( - Node* object, int offset, ExternalPointerTag tag) { -#ifdef V8_ENABLE_SANDBOX - DCHECK_NE(tag, kExternalPointerNullTag); - DCHECK(!IsSharedExternalPointerType(tag)); - Node* external_pointer = gasm_->LoadFromObject( - MachineType::Uint32(), object, wasm::ObjectAccess::ToTagged(offset)); - static_assert(kExternalPointerIndexShift > kSystemPointerSizeLog2); - Node* shift_amount = - gasm_->Int32Constant(kExternalPointerIndexShift - kSystemPointerSizeLog2); - Node* scaled_index = gasm_->Word32Shr(external_pointer, shift_amount); - Node* isolate_root = BuildLoadIsolateRoot(); - Node* table = - gasm_->LoadFromObject(MachineType::Pointer(), isolate_root, - IsolateData::external_pointer_table_offset() + - Internals::kExternalPointerTableBufferOffset); - Node* decoded_ptr = gasm_->Load(MachineType::Pointer(), table, scaled_index); - return gasm_->WordAnd(decoded_ptr, gasm_->IntPtrConstant(~tag)); -#else - return gasm_->LoadFromObject(MachineType::Pointer(), object, - wasm::ObjectAccess::ToTagged(offset)); -#endif // V8_ENABLE_SANDBOX -} - Node* WasmGraphBuilder::BuildLoadCallTargetFromExportedFunctionData( Node* function) { Node* internal = gasm_->LoadFromObject( MachineType::TaggedPointer(), function, wasm::ObjectAccess::ToTagged(WasmExportedFunctionData::kInternalOffset)); - return BuildLoadExternalPointerFromObject( + return gasm_->BuildLoadExternalPointerFromObject( internal, WasmInternalFunction::kCallTargetOffset, - kWasmInternalFunctionCallTargetTag); + kWasmInternalFunctionCallTargetTag, BuildLoadIsolateRoot()); } // TODO(9495): Support CAPI function refs. @@ -3019,9 +2997,9 @@ Node* WasmGraphBuilder::BuildCallRef(const wasm::FunctionSig* sig, MachineType::TaggedPointer(), function, wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset)); - Node* target = BuildLoadExternalPointerFromObject( + Node* target = gasm_->BuildLoadExternalPointerFromObject( function, WasmInternalFunction::kCallTargetOffset, - kWasmInternalFunctionCallTargetTag); + kWasmInternalFunctionCallTargetTag, BuildLoadIsolateRoot()); Node* is_null_target = gasm_->WordEqual(target, gasm_->IntPtrConstant(0)); gasm_->GotoIfNot(is_null_target, &end_label, target); { @@ -6065,6 +6043,14 @@ Node* WasmGraphBuilder::StringEncodeWtf16(uint32_t memory, Node* string, offset, gasm_->SmiConstant(memory)); } +Node* WasmGraphBuilder::StringAsWtf16(Node* string, CheckForNull null_check, + wasm::WasmCodePosition position) { + if (null_check == kWithNullCheck) { + string = AssertNotNull(string, position); + } + return gasm_->StringAsWtf16(string); +} + Node* WasmGraphBuilder::StringEncodeWtf16Array( Node* string, CheckForNull string_null_check, Node* array, CheckForNull array_null_check, Node* start, @@ -6167,9 +6153,56 @@ Node* WasmGraphBuilder::StringViewWtf16GetCodeUnit( if (null_check == kWithNullCheck) { string = AssertNotNull(string, position); } - return gasm_->CallBuiltin(Builtin::kWasmStringViewWtf16GetCodeUnit, - Operator::kNoDeopt | Operator::kNoThrow, string, - offset); + Node* prepare = gasm_->StringPrepareForGetCodeunit(string); + Node* base = gasm_->Projection(0, prepare); + Node* base_offset = gasm_->Projection(1, prepare); + Node* charwidth_shift = gasm_->Projection(2, prepare); + + // Bounds check. + Node* length = gasm_->LoadImmutableFromObject( + MachineType::Int32(), string, + wasm::ObjectAccess::ToTagged(String::kLengthOffset)); + TrapIfFalse(wasm::kTrapStringOffsetOutOfBounds, + gasm_->Uint32LessThan(offset, length), position); + + auto onebyte = gasm_->MakeLabel(); + auto bailout = gasm_->MakeDeferredLabel(); + auto done = gasm_->MakeLabel(MachineRepresentation::kWord32); + gasm_->GotoIf( + gasm_->Word32Equal(charwidth_shift, + gasm_->Int32Constant(kCharWidthBailoutSentinel)), + &bailout); + gasm_->GotoIf(gasm_->Word32Equal(charwidth_shift, gasm_->Int32Constant(0)), + &onebyte); + + // Two-byte. + Node* object_offset = + gasm_->IntAdd(gasm_->IntMul(gasm_->BuildChangeInt32ToIntPtr(offset), + gasm_->IntPtrConstant(2)), + base_offset); + Node* result = gasm_->LoadImmutableFromObject(MachineType::Uint16(), base, + object_offset); + gasm_->Goto(&done, result); + + // One-byte. + gasm_->Bind(&onebyte); + object_offset = + gasm_->IntAdd(gasm_->BuildChangeInt32ToIntPtr(offset), base_offset); + result = + gasm_->LoadImmutableFromObject(MachineType::Uint8(), base, object_offset); + gasm_->Goto(&done, result); + + gasm_->Bind(&bailout); + gasm_->Goto(&done, + gasm_->CallBuiltin(Builtin::kWasmStringViewWtf16GetCodeUnit, + Operator::kPure, string, offset)); + + gasm_->Bind(&done); + // Make sure the original string is kept alive as long as we're operating + // on pointers extracted from it (otherwise e.g. external strings' resources + // might get freed prematurely). + gasm_->Retain(string); + return done.PhiAt(0); } Node* WasmGraphBuilder::StringViewWtf16Encode(uint32_t memory, Node* string, @@ -6256,6 +6289,40 @@ Node* WasmGraphBuilder::StringFromCodePoint(Node* code_point) { Operator::kEliminatable, code_point); } +Node* WasmGraphBuilder::StringHash(Node* string, CheckForNull null_check, + wasm::WasmCodePosition position) { + if (null_check == kWithNullCheck) { + string = AssertNotNull(string, position); + } + + auto runtime_label = gasm_->MakeLabel(); + auto end_label = gasm_->MakeLabel(MachineRepresentation::kWord32); + + Node* raw_hash = gasm_->LoadFromObject( + MachineType::Int32(), string, + wasm::ObjectAccess::ToTagged(Name::kRawHashFieldOffset)); + Node* hash_not_computed_mask = + gasm_->Int32Constant(static_cast(Name::kHashNotComputedMask)); + static_assert(Name::HashFieldTypeBits::kShift == 0); + Node* hash_not_computed = gasm_->Word32And(raw_hash, hash_not_computed_mask); + gasm_->GotoIf(hash_not_computed, &runtime_label); + + // Fast path if hash is already computed: Decode raw hash value. + static_assert(Name::HashBits::kLastUsedBit == kBitsPerInt - 1); + Node* hash = gasm_->Word32Shr( + raw_hash, + gasm_->Int32Constant(static_cast(Name::HashBits::kShift))); + gasm_->Goto(&end_label, hash); + + gasm_->Bind(&runtime_label); + Node* hash_runtime = gasm_->CallBuiltin(Builtin::kWasmStringHash, + Operator::kEliminatable, string); + gasm_->Goto(&end_label, hash_runtime); + + gasm_->Bind(&end_label); + return end_label.PhiAt(0); +} + Node* WasmGraphBuilder::I31New(Node* input) { if constexpr (SmiValuesAre31Bits()) { return gasm_->Word32Shl(input, gasm_->BuildSmiShiftBitsConstant32()); @@ -6870,9 +6937,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* internal = gasm_->LoadFromObject( MachineType::TaggedPointer(), function_data, wasm::ObjectAccess::ToTagged(WasmFunctionData::kInternalOffset)); - args[0] = BuildLoadExternalPointerFromObject( + args[0] = gasm_->BuildLoadExternalPointerFromObject( internal, WasmInternalFunction::kCallTargetOffset, - kWasmInternalFunctionCallTargetTag); + kWasmInternalFunctionCallTargetTag, BuildLoadIsolateRoot()); Node* instance_node = gasm_->LoadFromObject( MachineType::TaggedPointer(), internal, wasm::ObjectAccess::ToTagged(WasmInternalFunction::kRefOffset)); diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h index c7ed5d6ddf..826fd8507b 100644 --- a/src/compiler/wasm-compiler.h +++ b/src/compiler/wasm-compiler.h @@ -534,6 +534,8 @@ class WasmGraphBuilder { Node* start, Node* end); Node* StringNewWtf16(uint32_t memory, Node* offset, Node* size); Node* StringNewWtf16Array(Node* array, Node* start, Node* end); + Node* StringAsWtf16(Node* string, CheckForNull null_check, + wasm::WasmCodePosition position); Node* StringConst(uint32_t index); Node* StringMeasureUtf8(Node* string, CheckForNull null_check, wasm::WasmCodePosition position); @@ -596,6 +598,8 @@ class WasmGraphBuilder { CheckForNull null_check_rhs, wasm::WasmCodePosition position); Node* StringFromCodePoint(Node* code_point); + Node* StringHash(Node* string, CheckForNull null_check, + wasm::WasmCodePosition position); Node* IsNull(Node* object); Node* TypeGuard(Node* value, wasm::ValueType type); @@ -821,10 +825,6 @@ class WasmGraphBuilder { Node* BuildMultiReturnFixedArrayFromIterable(const wasm::FunctionSig* sig, Node* iterable, Node* context); - Node* BuildLoadExternalPointerFromObject( - Node* object, int offset, - ExternalPointerTag tag = kForeignForeignAddressTag); - Node* BuildLoadCallTargetFromExportedFunctionData(Node* function_data); //----------------------------------------------------------------------- diff --git a/src/compiler/wasm-gc-lowering.cc b/src/compiler/wasm-gc-lowering.cc index abb623f79c..80dd35c5f1 100644 --- a/src/compiler/wasm-gc-lowering.cc +++ b/src/compiler/wasm-gc-lowering.cc @@ -74,16 +74,24 @@ Reduction WasmGCLowering::Reduce(Node* node) { return ReduceWasmArrayLength(node); case IrOpcode::kWasmArrayInitializeLength: return ReduceWasmArrayInitializeLength(node); + case IrOpcode::kStringAsWtf16: + return ReduceStringAsWtf16(node); + case IrOpcode::kStringPrepareForGetCodeunit: + return ReduceStringPrepareForGetCodeunit(node); default: return NoChange(); } } -Node* WasmGCLowering::RootNode(RootIndex index) { +Node* WasmGCLowering::IsolateRoot() { // TODO(13449): Use root register instead of isolate. - Node* isolate_root = gasm_.LoadImmutable( + return gasm_.LoadImmutable( MachineType::Pointer(), instance_node_, WasmInstanceObject::kIsolateRootOffset - kHeapObjectTag); +} + +Node* WasmGCLowering::RootNode(RootIndex index) { + Node* isolate_root = IsolateRoot(); return gasm_.LoadImmutable(MachineType::Pointer(), isolate_root, IsolateData::root_slot_offset(index)); } @@ -445,6 +453,204 @@ Reduction WasmGCLowering::ReduceWasmArrayInitializeLength(Node* node) { return Replace(set_length); } +Reduction WasmGCLowering::ReduceStringAsWtf16(Node* node) { + DCHECK_EQ(node->opcode(), IrOpcode::kStringAsWtf16); + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + Node* str = NodeProperties::GetValueInput(node, 0); + + gasm_.InitializeEffectControl(effect, control); + + auto done = gasm_.MakeLabel(MachineRepresentation::kTaggedPointer); + Node* instance_type = gasm_.LoadInstanceType(gasm_.LoadMap(str)); + Node* string_representation = gasm_.Word32And( + instance_type, gasm_.Int32Constant(kStringRepresentationMask)); + gasm_.GotoIf(gasm_.Word32Equal(string_representation, + gasm_.Int32Constant(kSeqStringTag)), + &done, str); + gasm_.Goto(&done, gasm_.CallBuiltin(Builtin::kWasmStringAsWtf16, + Operator::kPure, str)); + gasm_.Bind(&done); + ReplaceWithValue(node, done.PhiAt(0), gasm_.effect(), gasm_.control()); + node->Kill(); + return Replace(done.PhiAt(0)); +} + +Reduction WasmGCLowering::ReduceStringPrepareForGetCodeunit(Node* node) { + DCHECK_EQ(node->opcode(), IrOpcode::kStringPrepareForGetCodeunit); + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + Node* original_string = NodeProperties::GetValueInput(node, 0); + + gasm_.InitializeEffectControl(effect, control); + + auto dispatch = + gasm_.MakeLoopLabel(MachineRepresentation::kTaggedPointer, // String. + MachineRepresentation::kWord32, // Instance type. + MachineRepresentation::kWord32); // Offset. + auto next = gasm_.MakeLabel(MachineRepresentation::kTaggedPointer, // String. + MachineRepresentation::kWord32, // Instance type. + MachineRepresentation::kWord32); // Offset. + auto direct_string = + gasm_.MakeLabel(MachineRepresentation::kTaggedPointer, // String. + MachineRepresentation::kWord32, // Instance type. + MachineRepresentation::kWord32); // Offset. + + // These values will be used to replace the original node's projections. + // The first, "string", is either a SeqString or Smi(0) (in case of external + // string). Notably this makes it GC-safe: if that string moves, this pointer + // will be updated accordingly. + // The second, "offset", has full register width so that it can be used to + // store external pointers: for external strings, we add up the character + // backing store's base address and any slice offset. + // The third, "character width", is a shift width, i.e. it is 0 for one-byte + // strings, 1 for two-byte strings, kCharWidthBailoutSentinel for uncached + // external strings (for which "string"/"offset" are invalid and unusable). + auto done = + gasm_.MakeLabel(MachineRepresentation::kTagged, // String. + MachineType::PointerRepresentation(), // Offset. + MachineRepresentation::kWord32); // Character width. + + Node* original_type = gasm_.LoadInstanceType(gasm_.LoadMap(original_string)); + gasm_.Goto(&dispatch, original_string, original_type, gasm_.Int32Constant(0)); + + gasm_.Bind(&dispatch); + { + auto thin_string = gasm_.MakeLabel(MachineRepresentation::kTaggedPointer); + auto cons_string = gasm_.MakeLabel(MachineRepresentation::kTaggedPointer); + + Node* string = dispatch.PhiAt(0); + Node* instance_type = dispatch.PhiAt(1); + Node* offset = dispatch.PhiAt(2); + static_assert(kIsIndirectStringTag == 1); + static constexpr int kIsDirectStringTag = 0; + gasm_.GotoIf(gasm_.Word32Equal( + gasm_.Word32And(instance_type, gasm_.Int32Constant( + kIsIndirectStringMask)), + gasm_.Int32Constant(kIsDirectStringTag)), + &direct_string, string, instance_type, offset); + + // Handle indirect strings. + Node* string_representation = gasm_.Word32And( + instance_type, gasm_.Int32Constant(kStringRepresentationMask)); + gasm_.GotoIf(gasm_.Word32Equal(string_representation, + gasm_.Int32Constant(kThinStringTag)), + &thin_string, string); + gasm_.GotoIf(gasm_.Word32Equal(string_representation, + gasm_.Int32Constant(kConsStringTag)), + &cons_string, string); + + // Sliced string. + Node* new_offset = gasm_.Int32Add( + offset, + gasm_.BuildChangeSmiToInt32(gasm_.LoadImmutableFromObject( + MachineType::TaggedSigned(), string, + wasm::ObjectAccess::ToTagged(SlicedString::kOffsetOffset)))); + Node* parent = gasm_.LoadImmutableFromObject( + MachineType::TaggedPointer(), string, + wasm::ObjectAccess::ToTagged(SlicedString::kParentOffset)); + Node* parent_type = gasm_.LoadInstanceType(gasm_.LoadMap(parent)); + gasm_.Goto(&next, parent, parent_type, new_offset); + + // Thin string. + gasm_.Bind(&thin_string); + Node* actual = gasm_.LoadImmutableFromObject( + MachineType::TaggedPointer(), string, + wasm::ObjectAccess::ToTagged(ThinString::kActualOffset)); + Node* actual_type = gasm_.LoadInstanceType(gasm_.LoadMap(actual)); + // ThinStrings always reference (internalized) direct strings. + gasm_.Goto(&direct_string, actual, actual_type, offset); + + // Flat cons string. (Non-flat cons strings are ruled out by + // string.as_wtf16.) + gasm_.Bind(&cons_string); + Node* first = gasm_.LoadImmutableFromObject( + MachineType::TaggedPointer(), string, + wasm::ObjectAccess::ToTagged(ConsString::kFirstOffset)); + Node* first_type = gasm_.LoadInstanceType(gasm_.LoadMap(first)); + gasm_.Goto(&next, first, first_type, offset); + + gasm_.Bind(&next); + gasm_.Goto(&dispatch, next.PhiAt(0), next.PhiAt(1), next.PhiAt(2)); + } + + gasm_.Bind(&direct_string); + { + Node* string = direct_string.PhiAt(0); + Node* instance_type = direct_string.PhiAt(1); + Node* offset = direct_string.PhiAt(2); + + Node* is_onebyte = gasm_.Word32And( + instance_type, gasm_.Int32Constant(kStringEncodingMask)); + // Char width shift is 1 - (is_onebyte). + static_assert(kStringEncodingMask == 1 << 3); + Node* charwidth_shift = + gasm_.Int32Sub(gasm_.Int32Constant(1), + gasm_.Word32Shr(is_onebyte, gasm_.Int32Constant(3))); + + auto external = gasm_.MakeLabel(); + Node* string_representation = gasm_.Word32And( + instance_type, gasm_.Int32Constant(kStringRepresentationMask)); + gasm_.GotoIf(gasm_.Word32Equal(string_representation, + gasm_.Int32Constant(kExternalStringTag)), + &external); + + // Sequential string. + static_assert(SeqOneByteString::kCharsOffset == + SeqTwoByteString::kCharsOffset); + Node* final_offset = gasm_.Int32Add( + gasm_.Int32Constant( + wasm::ObjectAccess::ToTagged(SeqOneByteString::kCharsOffset)), + gasm_.Word32Shl(offset, charwidth_shift)); + gasm_.Goto(&done, string, gasm_.BuildChangeInt32ToIntPtr(final_offset), + charwidth_shift); + + // External string. + gasm_.Bind(&external); + gasm_.GotoIf( + gasm_.Word32And(instance_type, + gasm_.Int32Constant(kUncachedExternalStringMask)), + &done, string, gasm_.IntPtrConstant(0), + gasm_.Int32Constant(kCharWidthBailoutSentinel)); + Node* resource = gasm_.BuildLoadExternalPointerFromObject( + string, ExternalString::kResourceDataOffset, + kExternalStringResourceDataTag, IsolateRoot()); + Node* shifted_offset = gasm_.Word32Shl(offset, charwidth_shift); + final_offset = gasm_.IntPtrAdd( + resource, gasm_.BuildChangeInt32ToIntPtr(shifted_offset)); + gasm_.Goto(&done, gasm_.SmiConstant(0), final_offset, charwidth_shift); + } + + gasm_.Bind(&done); + Node* base = done.PhiAt(0); + Node* final_offset = done.PhiAt(1); + Node* charwidth_shift = done.PhiAt(2); + + Node* base_proj = NodeProperties::FindProjection(node, 0); + Node* offset_proj = NodeProperties::FindProjection(node, 1); + Node* charwidth_proj = NodeProperties::FindProjection(node, 2); + if (base_proj) { + ReplaceWithValue(base_proj, base, gasm_.effect(), gasm_.control()); + base_proj->Kill(); + } + if (offset_proj) { + ReplaceWithValue(offset_proj, final_offset, gasm_.effect(), + gasm_.control()); + offset_proj->Kill(); + } + if (charwidth_proj) { + ReplaceWithValue(charwidth_proj, charwidth_shift, gasm_.effect(), + gasm_.control()); + charwidth_proj->Kill(); + } + + // Wire up the dangling end of the new effect chain. + ReplaceWithValue(node, node, gasm_.effect(), gasm_.control()); + + node->Kill(); + return Replace(base); +} + } // namespace compiler } // namespace internal } // namespace v8 diff --git a/src/compiler/wasm-gc-lowering.h b/src/compiler/wasm-gc-lowering.h index b3dbc8d7fd..24f4468220 100644 --- a/src/compiler/wasm-gc-lowering.h +++ b/src/compiler/wasm-gc-lowering.h @@ -45,9 +45,14 @@ class WasmGCLowering final : public AdvancedReducer { Reduction ReduceWasmArraySet(Node* node); Reduction ReduceWasmArrayLength(Node* node); Reduction ReduceWasmArrayInitializeLength(Node* node); + Reduction ReduceStringAsWtf16(Node* node); + Reduction ReduceStringPrepareForGetCodeunit(Node* node); + Node* IsolateRoot(); Node* RootNode(RootIndex index); Node* Null(); Node* IsNull(Node* object); + Node* BuildLoadExternalPointerFromObject(Node* object, int offset, + ExternalPointerTag tag); WasmGraphAssembler gasm_; const wasm::WasmModule* module_; Node* dead_; diff --git a/src/compiler/wasm-gc-operator-reducer.cc b/src/compiler/wasm-gc-operator-reducer.cc index e30e9d8128..c01b2b0854 100644 --- a/src/compiler/wasm-gc-operator-reducer.cc +++ b/src/compiler/wasm-gc-operator-reducer.cc @@ -40,6 +40,8 @@ Reduction WasmGCOperatorReducer::Reduce(Node* node) { return ReduceIf(node, true); case IrOpcode::kIfFalse: return ReduceIf(node, false); + case IrOpcode::kDead: + return NoChange(); case IrOpcode::kLoop: return TakeStatesFromFirstControl(node); default: diff --git a/src/compiler/wasm-graph-assembler.cc b/src/compiler/wasm-graph-assembler.cc index a383f923de..a45f8f0594 100644 --- a/src/compiler/wasm-graph-assembler.cc +++ b/src/compiler/wasm-graph-assembler.cc @@ -173,6 +173,36 @@ Node* WasmGraphAssembler::InitializeImmutableInObject(ObjectAccess access, offset, value, effect(), control())); } +Node* WasmGraphAssembler::BuildLoadExternalPointerFromObject( + Node* object, int offset, ExternalPointerTag tag, Node* isolate_root) { +#ifdef V8_ENABLE_SANDBOX + DCHECK_NE(tag, kExternalPointerNullTag); + Node* external_pointer = LoadFromObject(MachineType::Uint32(), object, + wasm::ObjectAccess::ToTagged(offset)); + static_assert(kExternalPointerIndexShift > kSystemPointerSizeLog2); + Node* shift_amount = + Int32Constant(kExternalPointerIndexShift - kSystemPointerSizeLog2); + Node* scaled_index = Word32Shr(external_pointer, shift_amount); + Node* table; + if (IsSharedExternalPointerType(tag)) { + Node* table_address = + LoadFromObject(MachineType::Pointer(), isolate_root, + IsolateData::shared_external_pointer_table_offset()); + table = LoadFromObject(MachineType::Pointer(), table_address, + Internals::kExternalPointerTableBufferOffset); + } else { + table = LoadFromObject(MachineType::Pointer(), isolate_root, + IsolateData::external_pointer_table_offset() + + Internals::kExternalPointerTableBufferOffset); + } + Node* decoded_ptr = Load(MachineType::Pointer(), table, scaled_index); + return WordAnd(decoded_ptr, IntPtrConstant(~tag)); +#else + return LoadFromObject(MachineType::Pointer(), object, + wasm::ObjectAccess::ToTagged(offset)); +#endif // V8_ENABLE_SANDBOX +} + Node* WasmGraphAssembler::IsI31(Node* object) { if (COMPRESS_POINTERS_BOOL) { return Word32Equal(Word32And(object, Int32Constant(kSmiTagMask)), @@ -409,6 +439,16 @@ void WasmGraphAssembler::ArrayInitializeLength(Node* array, Node* length) { length, effect(), control())); } +Node* WasmGraphAssembler::StringAsWtf16(Node* string) { + return AddNode(graph()->NewNode(simplified_.StringAsWtf16(), string, effect(), + control())); +} + +Node* WasmGraphAssembler::StringPrepareForGetCodeunit(Node* string) { + return AddNode(graph()->NewNode(simplified_.StringPrepareForGetCodeunit(), + string, effect(), control())); +} + // Generic HeapObject helpers. Node* WasmGraphAssembler::HasInstanceType(Node* heap_object, diff --git a/src/compiler/wasm-graph-assembler.h b/src/compiler/wasm-graph-assembler.h index b22d39d402..bf8017d1e6 100644 --- a/src/compiler/wasm-graph-assembler.h +++ b/src/compiler/wasm-graph-assembler.h @@ -159,6 +159,10 @@ class WasmGraphAssembler : public GraphAssembler { value); } + Node* BuildLoadExternalPointerFromObject(Node* object, int offset, + ExternalPointerTag tag, + Node* isolate_root); + Node* IsI31(Node* object); // Maps and their contents. @@ -270,6 +274,10 @@ class WasmGraphAssembler : public GraphAssembler { void ArrayInitializeLength(Node* array, Node* length); + Node* StringAsWtf16(Node* string); + + Node* StringPrepareForGetCodeunit(Node* string); + // Generic helpers. Node* HasInstanceType(Node* heap_object, InstanceType type); diff --git a/src/compiler/wasm-load-elimination.cc b/src/compiler/wasm-load-elimination.cc index 00eff15ba0..74c9f24446 100644 --- a/src/compiler/wasm-load-elimination.cc +++ b/src/compiler/wasm-load-elimination.cc @@ -53,8 +53,11 @@ Node* ResolveAliases(Node* node) { return node; } -// We model array length as a field at index kArrayLengthFieldIndex. +// We model array length and string canonicalization as fields at negative +// indices. constexpr int kArrayLengthFieldIndex = -1; +constexpr int kStringPrepareForGetCodeunitIndex = -2; +constexpr int kStringAsWtf16Index = -3; } // namespace Reduction WasmLoadElimination::UpdateState(Node* node, @@ -121,6 +124,10 @@ Reduction WasmLoadElimination::Reduce(Node* node) { return ReduceWasmArrayLength(node); case IrOpcode::kWasmArrayInitializeLength: return ReduceWasmArrayInitializeLength(node); + case IrOpcode::kStringPrepareForGetCodeunit: + return ReduceStringPrepareForGetCodeunit(node); + case IrOpcode::kStringAsWtf16: + return ReduceStringAsWtf16(node); case IrOpcode::kEffectPhi: return ReduceEffectPhi(node); case IrOpcode::kDead: @@ -284,6 +291,70 @@ Reduction WasmLoadElimination::ReduceWasmArrayInitializeLength(Node* node) { return UpdateState(node, new_state); } +Reduction WasmLoadElimination::ReduceStringPrepareForGetCodeunit(Node* node) { + DCHECK_EQ(node->opcode(), IrOpcode::kStringPrepareForGetCodeunit); + Node* object = ResolveAliases(NodeProperties::GetValueInput(node, 0)); + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + + AbstractState const* state = node_states_.Get(effect); + if (state == nullptr) return NoChange(); + + HalfState const* mutable_state = &state->mutable_state; + + FieldOrElementValue lookup_result = + mutable_state->LookupField(kStringPrepareForGetCodeunitIndex, object); + + if (!lookup_result.IsEmpty() && !lookup_result.value->IsDead()) { + for (size_t i : {0, 1, 2}) { + Node* proj_to_replace = NodeProperties::FindProjection(node, i); + ReplaceWithValue(proj_to_replace, + NodeProperties::FindProjection(lookup_result.value, i)); + proj_to_replace->Kill(); + } + ReplaceWithValue(node, lookup_result.value, effect, control); + node->Kill(); + return Replace(lookup_result.value); + } + + mutable_state = + mutable_state->AddField(kStringPrepareForGetCodeunitIndex, object, node); + + AbstractState const* new_state = + zone()->New(*mutable_state, state->immutable_state); + + return UpdateState(node, new_state); +} + +Reduction WasmLoadElimination::ReduceStringAsWtf16(Node* node) { + DCHECK_EQ(node->opcode(), IrOpcode::kStringAsWtf16); + Node* object = ResolveAliases(NodeProperties::GetValueInput(node, 0)); + Node* effect = NodeProperties::GetEffectInput(node); + Node* control = NodeProperties::GetControlInput(node); + + AbstractState const* state = node_states_.Get(effect); + if (state == nullptr) return NoChange(); + + HalfState const* immutable_state = &state->immutable_state; + + FieldOrElementValue lookup_result = + immutable_state->LookupField(kStringAsWtf16Index, object); + + if (!lookup_result.IsEmpty() && !lookup_result.value->IsDead()) { + ReplaceWithValue(node, lookup_result.value, effect, control); + node->Kill(); + return Replace(lookup_result.value); + } + + immutable_state = + immutable_state->AddField(kStringAsWtf16Index, object, node); + + AbstractState const* new_state = + zone()->New(state->mutable_state, *immutable_state); + + return UpdateState(node, new_state); +} + Reduction WasmLoadElimination::ReduceOtherNode(Node* node) { if (node->op()->EffectOutputCount() == 0) return NoChange(); DCHECK_EQ(node->op()->EffectInputCount(), 1); @@ -296,6 +367,10 @@ Reduction WasmLoadElimination::ReduceOtherNode(Node* node) { // If this {node} has some uncontrolled side effects (i.e. it is a call // without {kNoWrite}), set its state to the immutable half-state of its // input state, otherwise to its input state. + // Any cached StringPrepareForGetCodeUnit nodes must be killed at any point + // that can cause internalization of strings (i.e. that can turn sequential + // strings into thin strings). Currently, that can only happen in JS, so + // from Wasm's point of view only in calls. return UpdateState(node, node->opcode() == IrOpcode::kCall && !node->op()->HasProperty(Operator::kNoWrite) ? zone()->New( @@ -308,6 +383,7 @@ Reduction WasmLoadElimination::ReduceStart(Node* node) { } Reduction WasmLoadElimination::ReduceEffectPhi(Node* node) { + DCHECK_EQ(node->opcode(), IrOpcode::kEffectPhi); Node* const effect0 = NodeProperties::GetEffectInput(node, 0); Node* const control = NodeProperties::GetControlInput(node); AbstractState const* state0 = node_states_.Get(effect0); diff --git a/src/compiler/wasm-load-elimination.h b/src/compiler/wasm-load-elimination.h index 1615b38eeb..76be09d27c 100644 --- a/src/compiler/wasm-load-elimination.h +++ b/src/compiler/wasm-load-elimination.h @@ -84,8 +84,6 @@ class V8_EXPORT_PRIVATE WasmLoadElimination final map.Set(outer_key, map_copy); } - static void KillField(int field_index, Node* object, - MachineRepresentation repr, Zone* zone); static void Print(const FieldInfos& infos); static void Print(const ElementInfos& infos); @@ -120,6 +118,8 @@ class V8_EXPORT_PRIVATE WasmLoadElimination final Reduction ReduceWasmStructSet(Node* node); Reduction ReduceWasmArrayLength(Node* node); Reduction ReduceWasmArrayInitializeLength(Node* node); + Reduction ReduceStringPrepareForGetCodeunit(Node* node); + Reduction ReduceStringAsWtf16(Node* node); Reduction ReduceEffectPhi(Node* node); Reduction ReduceStart(Node* node); Reduction ReduceOtherNode(Node* node); diff --git a/src/d8/d8.cc b/src/d8/d8.cc index 598b985298..8f0f71b5be 100644 --- a/src/d8/d8.cc +++ b/src/d8/d8.cc @@ -5686,6 +5686,14 @@ int Shell::Main(int argc, char* argv[]) { } #endif // V8_ENABLE_WEBASSEMBLY + if (i::v8_flags.experimental) { + // This message is printed to stderr so that it is also visible in + // Clusterfuzz reports. + fprintf(stderr, + "V8 is running with experimental features enabled. Stability and " + "security will suffer.\n"); + } + Isolate* isolate = Isolate::New(create_params); #ifdef V8_FUZZILLI diff --git a/src/deoptimizer/translated-state.cc b/src/deoptimizer/translated-state.cc index 93281ef7ab..00b64e12c0 100644 --- a/src/deoptimizer/translated-state.cc +++ b/src/deoptimizer/translated-state.cc @@ -1428,7 +1428,7 @@ int TranslatedState::CreateNextTranslatedValue( Address TranslatedState::DecompressIfNeeded(intptr_t value) { if (COMPRESS_POINTERS_BOOL) { - return V8HeapCompressionScheme::DecompressTaggedAny( + return V8HeapCompressionScheme::DecompressTagged( isolate(), static_cast(value)); } else { return value; diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index 735c3900f7..a702761dda 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -1973,8 +1973,7 @@ void WasmStruct::WasmStructPrint(std::ostream& os) { case wasm::kRtt: { Tagged_t raw = base::ReadUnalignedValue(field_address); #if V8_COMPRESS_POINTERS - Address obj = - V8HeapCompressionScheme::DecompressTaggedPointer(address(), raw); + Address obj = V8HeapCompressionScheme::DecompressTagged(address(), raw); #else Address obj = raw; #endif @@ -3000,7 +2999,7 @@ inline i::Object GetObjectFromRaw(void* object) { if (RoundDown(object_ptr) == i::kNullAddress) { // Try to decompress pointer. i::Isolate* isolate = i::Isolate::Current(); - object_ptr = i::V8HeapCompressionScheme::DecompressTaggedAny( + object_ptr = i::V8HeapCompressionScheme::DecompressTagged( isolate, static_cast(object_ptr)); } #endif diff --git a/src/diagnostics/perf-jit.cc b/src/diagnostics/perf-jit.cc index c6b88458bb..21d6878b56 100644 --- a/src/diagnostics/perf-jit.cc +++ b/src/diagnostics/perf-jit.cc @@ -117,7 +117,6 @@ const char LinuxPerfJitLogger::kFilenameFormatString[] = "./jit-%d.dump"; const int LinuxPerfJitLogger::kFilenameBufferPadding = 16; static const char kStringTerminator[] = {'\0'}; -static const char kRepeatedNameMarker[] = {'\xff', '\0'}; base::LazyRecursiveMutex LinuxPerfJitLogger::file_mutex_; // The following static variables are protected by @@ -214,11 +213,11 @@ uint64_t LinuxPerfJitLogger::GetTimestamp() { } void LinuxPerfJitLogger::LogRecordedBuffer( - Handle abstract_code, - MaybeHandle maybe_shared, const char* name, - int length) { + AbstractCode abstract_code, MaybeHandle maybe_shared, + const char* name, int length) { + DisallowGarbageCollection no_gc; if (v8_flags.perf_basic_prof_only_functions) { - CodeKind code_kind = abstract_code->kind(isolate_); + CodeKind code_kind = abstract_code.kind(isolate_); if (code_kind != CodeKind::INTERPRETED_FUNCTION && code_kind != CodeKind::TURBOFAN && code_kind != CodeKind::MAGLEV && code_kind != CodeKind::BASELINE) { @@ -231,26 +230,27 @@ void LinuxPerfJitLogger::LogRecordedBuffer( if (perf_output_handle_ == nullptr) return; // We only support non-interpreted functions. - if (!abstract_code->IsCode(isolate_)) return; - Handle code = Handle::cast(abstract_code); + if (!abstract_code.IsCode(isolate_)) return; + Code code = Code::cast(abstract_code); // Debug info has to be emitted first. Handle shared; if (v8_flags.perf_prof && maybe_shared.ToHandle(&shared)) { // TODO(herhut): This currently breaks for js2wasm/wasm2js functions. - if (code->kind() != CodeKind::JS_TO_WASM_FUNCTION && - code->kind() != CodeKind::WASM_TO_JS_FUNCTION) { + CodeKind kind = code.kind(); + if (kind != CodeKind::JS_TO_WASM_FUNCTION && + kind != CodeKind::WASM_TO_JS_FUNCTION) { LogWriteDebugInfo(code, shared); } } const char* code_name = name; - uint8_t* code_pointer = reinterpret_cast(code->InstructionStart()); + uint8_t* code_pointer = reinterpret_cast(code.InstructionStart()); // Unwinding info comes right after debug info. - if (v8_flags.perf_prof_unwinding_info) LogWriteUnwindingInfo(*code); + if (v8_flags.perf_prof_unwinding_info) LogWriteUnwindingInfo(code); - WriteJitCodeLoadEntry(code_pointer, code->InstructionSize(), code_name, + WriteJitCodeLoadEntry(code_pointer, code.InstructionSize(), code_name, length); } @@ -319,11 +319,11 @@ base::Vector GetScriptName(Object maybeScript, } // namespace -SourcePositionInfo GetSourcePositionInfo(Isolate* isolate, Handle code, +SourcePositionInfo GetSourcePositionInfo(Isolate* isolate, Code code, Handle function, SourcePosition pos) { DisallowGarbageCollection disallow; - if (code->is_turbofanned()) { + if (code.is_turbofanned()) { return pos.FirstInfo(isolate, code); } else { return SourcePositionInfo(pos, function); @@ -332,23 +332,25 @@ SourcePositionInfo GetSourcePositionInfo(Isolate* isolate, Handle code, } // namespace -void LinuxPerfJitLogger::LogWriteDebugInfo(Handle code, +void LinuxPerfJitLogger::LogWriteDebugInfo(Code code, Handle shared) { // Line ends of all scripts have been initialized prior to this. DisallowGarbageCollection no_gc; // The WasmToJS wrapper stubs have source position entries. - if (!shared->HasSourceCode()) return; + SharedFunctionInfo raw_shared = *shared; + if (!raw_shared.HasSourceCode()) return; PerfJitCodeDebugInfo debug_info; uint32_t size = sizeof(debug_info); ByteArray source_position_table = - code->SourcePositionTable(isolate_, *shared); + code.SourcePositionTable(isolate_, raw_shared); // Compute the entry count and get the names of all scripts. // Avoid additional work if the script name is repeated. Multiple script // names only occur for cross-script inlining. uint32_t entry_count = 0; Object last_script = Smi::zero(); + size_t last_script_name_size = 0; std::vector> script_names; for (SourcePositionTableIterator iterator(source_position_table); !iterator.done(); iterator.Advance()) { @@ -357,13 +359,15 @@ void LinuxPerfJitLogger::LogWriteDebugInfo(Handle code, Object current_script = *info.script; if (current_script != last_script) { std::unique_ptr name_storage; - auto name = GetScriptName(shared->script(), &name_storage, no_gc); + auto name = GetScriptName(raw_shared.script(), &name_storage, no_gc); script_names.push_back(name); // Add the size of the name after each entry. - size += name.size() + sizeof(kStringTerminator); + last_script_name_size = name.size() + sizeof(kStringTerminator); + size += last_script_name_size; last_script = current_script; } else { - size += sizeof(kRepeatedNameMarker); + DCHECK_LT(0, last_script_name_size); + size += last_script_name_size; } entry_count++; } @@ -371,7 +375,7 @@ void LinuxPerfJitLogger::LogWriteDebugInfo(Handle code, debug_info.event_ = PerfJitCodeLoad::kDebugInfo; debug_info.time_stamp_ = GetTimestamp(); - debug_info.address_ = code->InstructionStart(); + debug_info.address_ = code.InstructionStart(); debug_info.entry_count_ = entry_count; // Add the sizes of fixed parts of entries. @@ -381,7 +385,7 @@ void LinuxPerfJitLogger::LogWriteDebugInfo(Handle code, debug_info.size_ = size + padding; LogWriteBytes(reinterpret_cast(&debug_info), sizeof(debug_info)); - Address code_start = code->InstructionStart(); + Address code_start = code.InstructionStart(); last_script = Smi::zero(); int script_names_index = 0; @@ -398,16 +402,13 @@ void LinuxPerfJitLogger::LogWriteDebugInfo(Handle code, entry.column_ = info.column + 1; LogWriteBytes(reinterpret_cast(&entry), sizeof(entry)); Object current_script = *info.script; + auto name_string = script_names[script_names_index]; + LogWriteBytes(name_string.begin(), + static_cast(name_string.size())); + LogWriteBytes(kStringTerminator, sizeof(kStringTerminator)); if (current_script != last_script) { - auto name_string = script_names[script_names_index]; - LogWriteBytes(name_string.begin(), - static_cast(name_string.size())); - LogWriteBytes(kStringTerminator, sizeof(kStringTerminator)); - script_names_index++; + if (last_script != Smi::zero()) script_names_index++; last_script = current_script; - } else { - // Use the much shorter kRepeatedNameMarker for repeated names. - LogWriteBytes(kRepeatedNameMarker, sizeof(kRepeatedNameMarker)); } } char padding_bytes[8] = {0}; diff --git a/src/diagnostics/perf-jit.h b/src/diagnostics/perf-jit.h index 47f7322615..718903abba 100644 --- a/src/diagnostics/perf-jit.h +++ b/src/diagnostics/perf-jit.h @@ -58,7 +58,7 @@ class LinuxPerfJitLogger : public CodeEventLogger { void CloseMarkerFile(void* marker_address); uint64_t GetTimestamp(); - void LogRecordedBuffer(Handle code, + void LogRecordedBuffer(AbstractCode code, MaybeHandle maybe_shared, const char* name, int length) override; #if V8_ENABLE_WEBASSEMBLY @@ -79,7 +79,7 @@ class LinuxPerfJitLogger : public CodeEventLogger { void LogWriteBytes(const char* bytes, int size); void LogWriteHeader(); - void LogWriteDebugInfo(Handle code, Handle shared); + void LogWriteDebugInfo(Code code, Handle shared); #if V8_ENABLE_WEBASSEMBLY void LogWriteDebugInfo(const wasm::WasmCode* code); #endif // V8_ENABLE_WEBASSEMBLY diff --git a/src/diagnostics/unwinding-info-win64.cc b/src/diagnostics/unwinding-info-win64.cc index 7f3e3c6338..88be356e8e 100644 --- a/src/diagnostics/unwinding-info-win64.cc +++ b/src/diagnostics/unwinding-info-win64.cc @@ -450,7 +450,7 @@ void InitUnwindingRecord(Record* record, size_t code_size_in_bytes) { // Hardcoded thunk. AssemblerOptions options; options.record_reloc_info_for_serialization = false; - TurboAssembler masm(nullptr, options, CodeObjectRequired::kNo, + MacroAssembler masm(nullptr, options, CodeObjectRequired::kNo, NewAssemblerBuffer(64)); masm.Mov(x16, Operand(reinterpret_cast(&CRASH_HANDLER_FUNCTION_NAME))); diff --git a/src/execution/execution.cc b/src/execution/execution.cc index 02e911bef2..d017b82735 100644 --- a/src/execution/execution.cc +++ b/src/execution/execution.cc @@ -468,6 +468,7 @@ V8_WARN_UNUSED_RESULT MaybeHandle Invoke(Isolate* isolate, MaybeHandle InvokeWithTryCatch(Isolate* isolate, const InvokeParams& params) { + DCHECK(!isolate->is_execution_terminating()); bool is_termination = false; MaybeHandle maybe_result; if (params.exception_out != nullptr) { diff --git a/src/execution/frames.cc b/src/execution/frames.cc index 99d35408ef..c10b77379f 100644 --- a/src/execution/frames.cc +++ b/src/execution/frames.cc @@ -582,37 +582,39 @@ Code StackFrame::LookupCode() const { void StackFrame::IteratePc(RootVisitor* v, Address* pc_address, Address* constant_pool_address, GcSafeCode holder) const { - if (!holder.has_instruction_stream()) { - // The embedded builtins are immovable so there's no need to update PCs on - // the stack. Just visit the Code object. - Object code = holder.UnsafeCastToCode(); - v->VisitRunningCode(FullObjectSlot(&code)); + const Address old_pc = ReadPC(pc_address); + DCHECK_GE(old_pc, holder.InstructionStart(isolate(), old_pc)); + DCHECK_LT(old_pc, holder.InstructionEnd(isolate(), old_pc)); + + // Keep the old pc offset before visiting the code since we need it to + // calculate the new pc after a potential InstructionStream move. + // It's okay to use raw_instruction_start() here since `pc_offset_from_start` + // will only be used if `holder` is not a builtin. + const uintptr_t pc_offset_from_start = + old_pc - holder.raw_instruction_start(); + + // Visit. + GcSafeCode visited_holder = holder; + const Object old_istream = holder.raw_instruction_stream(); + Object visited_istream = old_istream; + v->VisitRunningCode(FullObjectSlot{&visited_holder}, + FullObjectSlot{&visited_istream}); + if (visited_istream == old_istream) { + // Note this covers two important cases: + // 1. the associated InstructionStream object did not move, and + // 2. `holder` is an embedded builtin and has no InstructionStream. return; } - InstructionStream unsafe_istream = InstructionStream::unchecked_cast( - holder.UnsafeCastToCode().raw_instruction_stream()); + DCHECK(visited_holder.has_instruction_stream()); - // Keep the old pc (offset) before visiting (and potentially moving) the - // InstructionStream. - const Address old_pc = ReadPC(pc_address); - DCHECK(isolate_->heap()->GcSafeInstructionStreamContains(unsafe_istream, - old_pc)); - const uintptr_t pc_offset = old_pc - unsafe_istream.instruction_start(); - - // Visit the InstructionStream. - Object visited_unsafe_istream = unsafe_istream; - v->VisitRunningCode(FullObjectSlot(&visited_unsafe_istream)); - if (visited_unsafe_istream == unsafe_istream) return; - - // Take care when accessing unsafe_istream from here on. It may have been - // moved. - unsafe_istream = InstructionStream::unchecked_cast(visited_unsafe_istream); - const Address pc = unsafe_istream.instruction_start() + pc_offset; + InstructionStream istream = + InstructionStream::unchecked_cast(visited_istream); + const Address new_pc = istream.instruction_start() + pc_offset_from_start; // TODO(v8:10026): avoid replacing a signed pointer. - PointerAuthentication::ReplacePC(pc_address, pc, kSystemPointerSize); + PointerAuthentication::ReplacePC(pc_address, new_pc, kSystemPointerSize); if (V8_EMBEDDED_CONSTANT_POOL_BOOL && constant_pool_address != nullptr) { - *constant_pool_address = unsafe_istream.constant_pool(); + *constant_pool_address = istream.constant_pool(); } } @@ -1107,7 +1109,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v, if (!HAS_SMI_TAG(value) && value <= 0xffffffff) { // We don't need to update smi values or full pointers. was_compressed = true; - *spill_slot.location() = V8HeapCompressionScheme::DecompressTaggedPointer( + *spill_slot.location() = V8HeapCompressionScheme::DecompressTagged( cage_base, static_cast(value)); if (DEBUG_BOOL) { // Ensure that the spill slot contains correct heap object. @@ -1142,7 +1144,7 @@ void VisitSpillSlot(Isolate* isolate, RootVisitor* v, if (!HAS_SMI_TAG(compressed_value)) { was_compressed = slot_contents <= 0xFFFFFFFF; // We don't need to update smi values. - *spill_slot.location() = V8HeapCompressionScheme::DecompressTaggedPointer( + *spill_slot.location() = V8HeapCompressionScheme::DecompressTagged( cage_base, compressed_value); } } diff --git a/src/execution/isolate-data.h b/src/execution/isolate-data.h index 8323cf6a7c..14a5036215 100644 --- a/src/execution/isolate-data.h +++ b/src/execution/isolate-data.h @@ -215,12 +215,12 @@ class IsolateData final { // runtime checks. void* embedder_data_[Internals::kNumIsolateDataSlots] = {}; - // Stores the state of the caller for TurboAssembler::CallCFunction so that + // Stores the state of the caller for MacroAssembler::CallCFunction so that // the sampling CPU profiler can iterate the stack during such calls. These // are stored on IsolateData so that they can be stored to with only one move // instruction in compiled code. // - // The FP and PC that are saved right before TurboAssembler::CallCFunction. + // The FP and PC that are saved right before MacroAssembler::CallCFunction. Address fast_c_call_caller_fp_ = kNullAddress; Address fast_c_call_caller_pc_ = kNullAddress; // The address of the fast API callback right before it's executed from diff --git a/src/execution/isolate-inl.h b/src/execution/isolate-inl.h index 026e7cfd71..8edeb5dfb8 100644 --- a/src/execution/isolate-inl.h +++ b/src/execution/isolate-inl.h @@ -133,7 +133,7 @@ Object Isolate::VerifyBuiltinsResult(Object result) { // because that's the assumption in generated code (which might call this // builtin). if (!result.IsSmi()) { - DCHECK_EQ(result.ptr(), V8HeapCompressionScheme::DecompressTaggedPointer( + DCHECK_EQ(result.ptr(), V8HeapCompressionScheme::DecompressTagged( this, static_cast(result.ptr()))); } #endif @@ -149,11 +149,11 @@ ObjectPair Isolate::VerifyBuiltinsResult(ObjectPair pair) { // because that's the assumption in generated code (which might call this // builtin). if (!HAS_SMI_TAG(pair.x)) { - DCHECK_EQ(pair.x, V8HeapCompressionScheme::DecompressTaggedPointer( + DCHECK_EQ(pair.x, V8HeapCompressionScheme::DecompressTagged( this, static_cast(pair.x))); } if (!HAS_SMI_TAG(pair.y)) { - DCHECK_EQ(pair.y, V8HeapCompressionScheme::DecompressTaggedPointer( + DCHECK_EQ(pair.y, V8HeapCompressionScheme::DecompressTagged( this, static_cast(pair.y))); } #endif // V8_COMPRESS_POINTERS diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc index 09d0315c0b..1142e544db 100644 --- a/src/execution/isolate.cc +++ b/src/execution/isolate.cc @@ -4140,7 +4140,7 @@ void Isolate::VerifyStaticRoots() { for (Tagged_t cmp_ptr : StaticReadOnlyRootsPointerTable) { Address the_root = roots[idx]; Address ptr = - V8HeapCompressionScheme::DecompressTaggedPointer(cage_base(), cmp_ptr); + V8HeapCompressionScheme::DecompressTagged(cage_base(), cmp_ptr); CHECK_WITH_MSG(the_root == ptr, STATIC_ROOTS_FAILED_MSG); // All roots must fit on first page, since only this page is guaranteed to // have a stable offset from the cage base. If this ever changes we need @@ -4378,9 +4378,9 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data, Address base = code_cage->base(); Address last = base + code_cage->size() - 1; PtrComprCageBase code_cage_base{code_cage_base_}; - CHECK_EQ(base, ComprScheme::DecompressTaggedPointer( + CHECK_EQ(base, ComprScheme::DecompressTagged( code_cage_base, ComprScheme::CompressTagged(base))); - CHECK_EQ(last, ComprScheme::DecompressTaggedPointer( + CHECK_EQ(last, ComprScheme::DecompressTagged( code_cage_base, ComprScheme::CompressTagged(last))); } #endif // V8_EXTERNAL_CODE_SPACE diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc index 98cfe7f14f..16fdeb457b 100644 --- a/src/extensions/gc-extension.cc +++ b/src/extensions/gc-extension.cc @@ -5,6 +5,7 @@ #include "src/extensions/gc-extension.h" #include "include/v8-isolate.h" +#include "include/v8-microtask-queue.h" #include "include/v8-object.h" #include "include/v8-persistent-handle.h" #include "include/v8-primitive.h" @@ -121,6 +122,8 @@ class AsyncGC final : public CancelableTask { InvokeGC(isolate_, ExecutionType::kAsync, type_); auto resolver = v8::Local::New(isolate_, resolver_); auto ctx = Local::New(isolate_, ctx_); + v8::MicrotasksScope microtasks_scope( + ctx, v8::MicrotasksScope::kDoNotRunMicrotasks); resolver->Resolve(ctx, v8::Undefined(isolate_)).ToChecked(); } diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index 8a492ee43d..7e987f464e 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -187,6 +187,22 @@ // #define FLAG FLAG_FULL +// Experimental features. +// Features that are still considered experimental and which are not ready for +// fuzz testing should be defined using this macro. The feature will then imply +// --experimental, which will indicate to the user that they are running an +// experimental configuration of V8. Experimental features are always disabled +// by default. When these features mature, the flag should first turn into a +// regular feature flag (still disabled by default) and then ideally be staged +// behind (for example) --future before being enabled by default. +DEFINE_BOOL(experimental, false, + "Indicates that V8 is running with experimental features enabled. " + "This flag is typically not set explicitly but instead enabled as " + "an implication of other flags which enable experimental features.") +#define DEFINE_EXPERIMENTAL_FEATURE(nam, cmt) \ + FLAG(BOOL, bool, nam, false, cmt " (experimental)") \ + DEFINE_IMPLICATION(nam, experimental) + // ATTENTION: This is set to true by default in d8. But for API compatibility, // it generally defaults to false. DEFINE_BOOL(abort_on_contradictory_flags, false, @@ -237,9 +253,8 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features") #endif // Features that are complete (but still behind the --harmony flag). -#define HARMONY_STAGED_BASE(V) \ - V(harmony_regexp_unicode_sets, "harmony RegExp Unicode Sets") \ - V(harmony_rab_gsab_transfer, "harmony ArrayBuffer.transfer") \ +#define HARMONY_STAGED_BASE(V) \ + V(harmony_rab_gsab_transfer, "harmony ArrayBuffer.transfer") \ V(harmony_array_grouping, "harmony array grouping") DEFINE_IMPLICATION(harmony_rab_gsab_transfer, harmony_rab_gsab) @@ -259,7 +274,8 @@ DEFINE_IMPLICATION(harmony_rab_gsab_transfer, harmony_rab_gsab) V(harmony_change_array_by_copy, "harmony change-Array-by-copy") \ V(harmony_string_is_well_formed, "harmony String#{is,to}WellFormed") \ V(harmony_rab_gsab, \ - "harmony ResizableArrayBuffer / GrowableSharedArrayBuffer") + "harmony ResizableArrayBuffer / GrowableSharedArrayBuffer") \ + V(harmony_regexp_unicode_sets, "harmony RegExp Unicode Sets") #ifdef V8_INTL_SUPPORT #define HARMONY_SHIPPING(V) \ @@ -462,8 +478,8 @@ DEFINE_BOOL(lower_tier_as_toptier, false, #define V8_ENABLE_MAGLEV_BOOL true DEFINE_BOOL(maglev, false, "enable the maglev optimizing compiler") DEFINE_WEAK_IMPLICATION(future, maglev) -DEFINE_BOOL(maglev_inlining, false, - "enable inlining in the maglev optimizing compiler") +DEFINE_EXPERIMENTAL_FEATURE(maglev_inlining, + "enable inlining in the maglev optimizing compiler") DEFINE_BOOL(maglev_reuse_stack_slots, true, "reuse stack slots in the maglev optimizing compiler") @@ -1198,7 +1214,7 @@ DEFINE_WEAK_IMPLICATION(experimental_wasm_gc, wasm_speculative_inlining) DEFINE_BOOL(wasm_loop_unrolling, true, "enable loop unrolling for wasm functions") -DEFINE_BOOL(wasm_loop_peeling, false, "enable loop peeling for wasm functions") +DEFINE_BOOL(wasm_loop_peeling, true, "enable loop peeling for wasm functions") DEFINE_SIZE_T(wasm_loop_peeling_max_size, 1000, "maximum size for peeling") DEFINE_BOOL(wasm_fuzzer_gen_test, false, "generate a test case when running a wasm fuzzer") diff --git a/src/handles/global-handles.cc b/src/handles/global-handles.cc index 4437e6ebaa..fc5908794c 100644 --- a/src/handles/global-handles.cc +++ b/src/handles/global-handles.cc @@ -700,7 +700,7 @@ V8_INLINE bool GlobalHandles::ResetWeakNodeIfDead( case WeaknessType::kCallback: V8_FALLTHROUGH; case WeaknessType::kCallbackWithTwoEmbedderFields: - node->CollectPhantomCallbackData(®ular_pending_phantom_callbacks_); + node->CollectPhantomCallbackData(&pending_phantom_callbacks_); break; } return true; @@ -730,9 +730,12 @@ void GlobalHandles::ProcessWeakYoungObjects( if (node->IsWeakRetainer() && !ResetWeakNodeIfDead(node, should_reset_handle)) { - // Node is weak and alive, so it should be passed onto the visitor. - v->VisitRootPointer(Root::kGlobalHandles, node->label(), - node->location()); + // Node is weak and alive, so it should be passed onto the visitor if + // present. + if (v) { + v->VisitRootPointer(Root::kGlobalHandles, node->label(), + node->location()); + } } } } @@ -817,35 +820,35 @@ void GlobalHandles::ClearListOfYoungNodes() { ClearListOfYoungNodesImpl(isolate_, &young_nodes_); } -template -size_t GlobalHandles::InvokeFirstPassWeakCallbacks( - std::vector>* pending) { - size_t freed_nodes = 0; - std::vector> pending_phantom_callbacks; - pending_phantom_callbacks.swap(*pending); - { - // The initial pass callbacks must simply clear the nodes. - for (auto& pair : pending_phantom_callbacks) { - T* node = pair.first; - DCHECK_EQ(T::NEAR_DEATH, node->state()); - pair.second.Invoke(isolate(), PendingPhantomCallback::kFirstPass); - - // Transition to second pass. It is required that the first pass callback - // resets the handle using |v8::PersistentBase::Reset|. Also see comments - // on |v8::WeakCallbackInfo|. - CHECK_WITH_MSG(T::FREE == node->state(), - "Handle not reset in first callback. See comments on " - "|v8::WeakCallbackInfo|."); - - if (pair.second.callback()) second_pass_callbacks_.push_back(pair.second); - freed_nodes++; - } - } - return freed_nodes; -} - size_t GlobalHandles::InvokeFirstPassWeakCallbacks() { - return InvokeFirstPassWeakCallbacks(®ular_pending_phantom_callbacks_); + last_gc_custom_callbacks_ = 0; + if (pending_phantom_callbacks_.empty()) return 0; + + TRACE_GC(isolate()->heap()->tracer(), + GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES); + + size_t freed_nodes = 0; + std::vector> + pending_phantom_callbacks; + pending_phantom_callbacks.swap(pending_phantom_callbacks_); + // The initial pass callbacks must simply clear the nodes. + for (auto& pair : pending_phantom_callbacks) { + Node* node = pair.first; + DCHECK_EQ(Node::NEAR_DEATH, node->state()); + pair.second.Invoke(isolate(), PendingPhantomCallback::kFirstPass); + + // Transition to second pass. It is required that the first pass callback + // resets the handle using |v8::PersistentBase::Reset|. Also see comments + // on |v8::WeakCallbackInfo|. + CHECK_WITH_MSG(Node::FREE == node->state(), + "Handle not reset in first callback. See comments on " + "|v8::WeakCallbackInfo|."); + + if (pair.second.callback()) second_pass_callbacks_.push_back(pair.second); + freed_nodes++; + } + last_gc_custom_callbacks_ = freed_nodes; + return 0; } void GlobalHandles::PendingPhantomCallback::Invoke(Isolate* isolate, diff --git a/src/handles/global-handles.h b/src/handles/global-handles.h index 54b0d52a0a..e715a9046f 100644 --- a/src/handles/global-handles.h +++ b/src/handles/global-handles.h @@ -103,9 +103,9 @@ class V8_EXPORT_PRIVATE GlobalHandles final { // Iterates over strong and dependent handles. See the note above. void IterateYoungStrongAndDependentRoots(RootVisitor* v); - // Processes all young weak objects. Weak objects for which - // `should_reset_handle()` returns true are reset and others are passed to the - // visitor `v`. + // Processes all young weak objects: + // - Weak objects for which `should_reset_handle()` returns true are reset; + // - Others are passed to `v` iff `v` is not null. void ProcessWeakYoungObjects(RootVisitor* v, WeakSlotCallbackWithHeap should_reset_handle); @@ -121,6 +121,7 @@ class V8_EXPORT_PRIVATE GlobalHandles final { size_t UsedSize() const; // Number of global handles. size_t handles_count() const; + size_t last_gc_custom_callbacks() const { return last_gc_custom_callbacks_; } void IterateAllRootsForTesting(v8::PersistentHandleVisitor* v); @@ -138,10 +139,6 @@ class V8_EXPORT_PRIVATE GlobalHandles final { class NodeSpace; class PendingPhantomCallback; - template - size_t InvokeFirstPassWeakCallbacks( - std::vector>* pending); - void ApplyPersistentHandleVisitor(v8::PersistentHandleVisitor* visitor, Node* node); @@ -159,9 +156,10 @@ class V8_EXPORT_PRIVATE GlobalHandles final { // is accessed, some of the objects may have been promoted already. std::vector young_nodes_; std::vector> - regular_pending_phantom_callbacks_; + pending_phantom_callbacks_; std::vector second_pass_callbacks_; bool second_pass_callbacks_task_posted_ = false; + size_t last_gc_custom_callbacks_ = 0; }; class GlobalHandles::PendingPhantomCallback final { diff --git a/src/handles/traced-handles.cc b/src/handles/traced-handles.cc index 82e5a3eb6c..56c5985f9c 100644 --- a/src/handles/traced-handles.cc +++ b/src/handles/traced-handles.cc @@ -926,8 +926,10 @@ void TracedHandlesImpl::ProcessYoungObjects( } else { if (!node->is_root()) { node->set_root(true); - visitor->VisitRootPointer(Root::kGlobalHandles, nullptr, - node->location()); + if (visitor) { + visitor->VisitRootPointer(Root::kGlobalHandles, nullptr, + node->location()); + } } } } diff --git a/src/heap/OWNERS b/src/heap/OWNERS index de698441f9..bcdce0ea89 100644 --- a/src/heap/OWNERS +++ b/src/heap/OWNERS @@ -6,3 +6,4 @@ nikolaos@chromium.org omerkatz@chromium.org per-file *factory*=file:../objects/OWNERS +per-file static-roots.h=file:../../COMMON_OWNERS diff --git a/src/heap/cppgc/write-barrier.cc b/src/heap/cppgc/write-barrier.cc index d7aa1284eb..a2f1eb4ab4 100644 --- a/src/heap/cppgc/write-barrier.cc +++ b/src/heap/cppgc/write-barrier.cc @@ -226,14 +226,23 @@ bool YoungGenerationEnabler::IsEnabled() { #ifdef CPPGC_SLIM_WRITE_BARRIER // static +template void WriteBarrier::CombinedWriteBarrierSlow(const void* slot) { + DCHECK_NOT_NULL(slot); + + const void* value = nullptr; #if defined(CPPGC_POINTER_COMPRESSION) - using MemberStorage = CompressedPointer; -#else // !defined(CPPGC_POINTER_COMPRESSION) - using MemberStorage = RawPointer; -#endif // !defined(CPPGC_POINTER_COMPRESSION) - const auto* storage = reinterpret_cast(slot); - const void* value = storage->Load(); + if constexpr (SlotType == WriteBarrierSlotType::kCompressed) { + value = CompressedPointer::Decompress( + *static_cast(slot)); + } else { + value = *reinterpret_cast(slot); + } +#else + static_assert(SlotType == WriteBarrierSlotType::kUncompressed); + value = *reinterpret_cast(slot); +#endif + WriteBarrier::Params params; const WriteBarrier::Type type = WriteBarrier::GetWriteBarrierType(slot, value, params); @@ -253,6 +262,13 @@ void WriteBarrier::CombinedWriteBarrierSlow(const void* slot) { } } +template V8_EXPORT_PRIVATE void WriteBarrier::CombinedWriteBarrierSlow< + WriteBarrierSlotType::kUncompressed>(const void* slot); +#if defined(CPPGC_POINTER_COMPRESSION) +template V8_EXPORT_PRIVATE void WriteBarrier::CombinedWriteBarrierSlow< + WriteBarrierSlotType::kCompressed>(const void* slot); +#endif // defined(CPPGC_POINTER_COMPRESSION) + #endif // CPPGC_SLIM_WRITE_BARRIER } // namespace internal diff --git a/src/heap/factory.cc b/src/heap/factory.cc index 5df2a5b152..c8f885af1f 100644 --- a/src/heap/factory.cc +++ b/src/heap/factory.cc @@ -2028,6 +2028,7 @@ Handle Factory::NewMap(InstanceType type, int instance_size, ElementsKind elements_kind, int inobject_properties, AllocationType allocation_type) { static_assert(LAST_JS_OBJECT_TYPE == LAST_TYPE); + DCHECK(!InstanceTypeChecker::UniqueMapOfInstanceType(type).has_value()); DCHECK_IMPLIES(InstanceTypeChecker::IsJSObject(type) && !Map::CanHaveFastTransitionableElementsKind(type), IsDictionaryElementsKind(elements_kind) || diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc index 37be8e42f7..d28e2c4556 100644 --- a/src/heap/gc-tracer.cc +++ b/src/heap/gc-tracer.cc @@ -861,9 +861,9 @@ void GCTracer::PrintNVP() const { "mark.seed=%.2f " "mark.closure_parallel=%.2f " "mark.closure=%.2f " - "mark.global_handles=%.2f " "clear=%.2f " "clear.string_table=%.2f " + "clear.global_handles=%.2f " "complete.sweep_array_buffers=%.2f " "complete.sweeping=%.2f " "evacuate=%.2f " @@ -906,9 +906,9 @@ void GCTracer::PrintNVP() const { current_scope(Scope::MINOR_MC_MARK_SEED), current_scope(Scope::MINOR_MC_MARK_CLOSURE_PARALLEL), current_scope(Scope::MINOR_MC_MARK_CLOSURE), - current_scope(Scope::MINOR_MC_MARK_GLOBAL_HANDLES), current_scope(Scope::MINOR_MC_CLEAR), current_scope(Scope::MINOR_MC_CLEAR_STRING_TABLE), + current_scope(Scope::MINOR_MC_CLEAR_WEAK_GLOBAL_HANDLES), current_scope(Scope::MINOR_MC_COMPLETE_SWEEP_ARRAY_BUFFERS), current_scope(Scope::MINOR_MC_COMPLETE_SWEEPING), current_scope(Scope::MINOR_MC_EVACUATE), diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 1021b19f0b..8911bd96c3 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -1518,21 +1518,23 @@ void Heap::CollectAllAvailableGarbage(GarbageCollectionReason gc_reason) { // The optimizing compiler may be unnecessarily holding on to memory. isolate()->AbortConcurrentOptimization(BlockingBehavior::kDontBlock); isolate()->ClearSerializerData(); + isolate()->compilation_cache()->Clear(); + set_current_gc_flags( kReduceMemoryFootprintMask | (gc_reason == GarbageCollectionReason::kLowMemoryNotification ? kForcedGC : 0)); - isolate_->compilation_cache()->Clear(); - const int kMaxNumberOfAttempts = 7; - const int kMinNumberOfAttempts = 2; + constexpr int kMaxNumberOfAttempts = 7; + constexpr int kMinNumberOfAttempts = 2; for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { - if (!CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags) && - attempt + 1 >= kMinNumberOfAttempts) { + CollectGarbage(OLD_SPACE, gc_reason, kNoGCCallbackFlags); + if ((isolate()->global_handles()->last_gc_custom_callbacks() == 0) && + (attempt + 1 >= kMinNumberOfAttempts)) { break; } } - set_current_gc_flags(kNoGCFlags); + EagerlyFreeExternalMemory(); if (v8_flags.trace_duplicate_threshold_kb) { @@ -1638,7 +1640,7 @@ void InvokeExternalCallbacks(Isolate* isolate, Callback callback) { } // namespace -bool Heap::CollectGarbage(AllocationSpace space, +void Heap::CollectGarbage(AllocationSpace space, GarbageCollectionReason gc_reason, const v8::GCCallbackFlags gc_callback_flags) { if (V8_UNLIKELY(!deserialization_complete_)) { @@ -1695,7 +1697,6 @@ bool Heap::CollectGarbage(AllocationSpace space, embedder_stack_state_ = StackState::kNoHeapPointers; } - size_t freed_global_handles = 0; size_t committed_memory_before = collector == GarbageCollector::MARK_COMPACTOR ? CommittedOldGenerationMemory() : 0; @@ -1737,8 +1738,7 @@ bool Heap::CollectGarbage(AllocationSpace space, if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) { tp_heap_->CollectGarbage(); } else { - freed_global_handles += - PerformGarbageCollection(collector, gc_reason, collector_reason); + PerformGarbageCollection(collector, gc_reason, collector_reason); } // Clear flags describing the current GC now that the current GC is // complete. Do this before GarbageCollectionEpilogue() since that could @@ -1816,8 +1816,6 @@ bool Heap::CollectGarbage(AllocationSpace space, FatalProcessOutOfMemory("Reached heap limit"); } } - - return freed_global_handles > 0; } int Heap::NotifyContextDisposed(bool dependant_context) { @@ -2164,9 +2162,9 @@ void ClearStubCaches(Isolate* isolate) { } // namespace -size_t Heap::PerformGarbageCollection(GarbageCollector collector, - GarbageCollectionReason gc_reason, - const char* collector_reason) { +void Heap::PerformGarbageCollection(GarbageCollector collector, + GarbageCollectionReason gc_reason, + const char* collector_reason) { DisallowJavascriptExecution no_js(isolate()); if (IsYoungGenerationCollector(collector)) { @@ -2285,15 +2283,9 @@ size_t Heap::PerformGarbageCollection(GarbageCollector collector, // Update relocatables. Relocatable::PostGarbageCollectionProcessing(isolate_); - size_t freed_global_handles; - - { - TRACE_GC(tracer(), GCTracer::Scope::HEAP_EXTERNAL_WEAK_GLOBAL_HANDLES); - // First round weak callbacks are not supposed to allocate and trigger - // nested GCs. - freed_global_handles = - isolate_->global_handles()->InvokeFirstPassWeakCallbacks(); - } + // First round weak callbacks are not supposed to allocate and trigger + // nested GCs. + isolate_->global_handles()->InvokeFirstPassWeakCallbacks(); if (cpp_heap() && (collector == GarbageCollector::MARK_COMPACTOR || collector == GarbageCollector::MINOR_MARK_COMPACTOR)) { @@ -2333,8 +2325,6 @@ size_t Heap::PerformGarbageCollection(GarbageCollector collector, } }); } - - return freed_global_handles; } bool Heap::CollectGarbageShared(LocalHeap* local_heap, @@ -4775,10 +4765,13 @@ class ClientRootVisitor : public RootVisitor { actual_visitor_->VisitRootPointers(root, description, start, end); } - void VisitRunningCode(FullObjectSlot slot) final { + void VisitRunningCode(FullObjectSlot code_slot, + FullObjectSlot maybe_istream_slot) final { #if DEBUG - HeapObject object = HeapObject::cast(*slot); - DCHECK(!object.InSharedWritableHeap()); + DCHECK(!HeapObject::cast(*code_slot).InSharedWritableHeap()); + Object maybe_istream = *maybe_istream_slot; + DCHECK(maybe_istream == Smi::zero() || + !HeapObject::cast(maybe_istream).InSharedWritableHeap()); #endif } diff --git a/src/heap/heap.h b/src/heap/heap.h index 2a371c0f72..bc8926a733 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -981,7 +981,7 @@ class Heap { // Performs garbage collection operation. // Returns whether there is a chance that another major GC could // collect more garbage. - V8_EXPORT_PRIVATE bool CollectGarbage( + V8_EXPORT_PRIVATE void CollectGarbage( AllocationSpace space, GarbageCollectionReason gc_reason, const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags); @@ -1780,10 +1780,9 @@ class Heap { void UnmarkSharedLinearAllocationAreas(); // Performs garbage collection in a safepoint. - // Returns the number of freed global handles. - size_t PerformGarbageCollection(GarbageCollector collector, - GarbageCollectionReason gc_reason, - const char* collector_reason); + void PerformGarbageCollection(GarbageCollector collector, + GarbageCollectionReason gc_reason, + const char* collector_reason); // Performs garbage collection in the shared heap. void PerformSharedGarbageCollection(Isolate* initiator, diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc index e220e722bc..484917c537 100644 --- a/src/heap/incremental-marking.cc +++ b/src/heap/incremental-marking.cc @@ -275,8 +275,18 @@ void IncrementalMarking::MarkRoots() { std::vector marking_items; RememberedSet::IterateMemoryChunks( - heap_, [&marking_items](MemoryChunk* chunk) { - marking_items.emplace_back(chunk); + heap(), [&marking_items](MemoryChunk* chunk) { + if (chunk->slot_set()) { + marking_items.emplace_back( + chunk, PageMarkingItem::SlotsType::kRegularSlots); + } else { + chunk->ReleaseInvalidatedSlots(); + } + + if (chunk->typed_slot_set()) { + marking_items.emplace_back(chunk, + PageMarkingItem::SlotsType::kTypedSlots); + } }); V8::GetCurrentPlatform() diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h index 7168b7350a..aebd8ec18a 100644 --- a/src/heap/mark-compact-inl.h +++ b/src/heap/mark-compact-inl.h @@ -290,64 +290,6 @@ typename LiveObjectRange::iterator LiveObjectRange::end() { Isolate* CollectorBase::isolate() { return heap()->isolate(); } -class YoungGenerationMarkingTask; - -class PageMarkingItem : public ParallelWorkItem { - public: - explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {} - ~PageMarkingItem() = default; - - void Process(YoungGenerationMarkingTask* task); - - private: - inline Heap* heap() { return chunk_->heap(); } - - void MarkUntypedPointers(YoungGenerationMarkingTask* task); - - void MarkTypedPointers(YoungGenerationMarkingTask* task); - - template - V8_INLINE SlotCallbackResult - CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot); - - MemoryChunk* chunk_; -}; - -enum class YoungMarkingJobType { kAtomic, kIncremental }; - -class YoungGenerationMarkingJob : public v8::JobTask { - public: - YoungGenerationMarkingJob(Isolate* isolate, Heap* heap, - MarkingWorklists* global_worklists, - std::vector marking_items, - YoungMarkingJobType young_marking_job_type) - : isolate_(isolate), - heap_(heap), - global_worklists_(global_worklists), - marking_items_(std::move(marking_items)), - remaining_marking_items_(marking_items_.size()), - generator_(marking_items_.size()), - young_marking_job_type_(young_marking_job_type) {} - - void Run(JobDelegate* delegate) override; - size_t GetMaxConcurrency(size_t worker_count) const override; - bool incremental() const { - return young_marking_job_type_ == YoungMarkingJobType::kIncremental; - } - - private: - void ProcessItems(JobDelegate* delegate); - void ProcessMarkingItems(YoungGenerationMarkingTask* task); - - Isolate* isolate_; - Heap* heap_; - MarkingWorklists* global_worklists_; - std::vector marking_items_; - std::atomic_size_t remaining_marking_items_{0}; - IndexGenerator generator_; - YoungMarkingJobType young_marking_job_type_; -}; - } // namespace internal } // namespace v8 diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index b01cb0c238..25d6513523 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -1093,39 +1093,24 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor { } } - void VisitRunningCode(FullObjectSlot p) final { - // If Code is currently executing, then we must not remove its - // deoptimization literals, which it might need in order to successfully - // deoptimize. - // - // Must match behavior in RootsReferencesExtractor::VisitRunningCode, so - // that heap snapshots accurately describe the roots. - HeapObject value = HeapObject::cast(*p); - if (!IsCodeSpaceObject(value)) { - // The slot might contain a Code object representing an - // embedded builtin, which doesn't require additional processing. - DCHECK(!Code::cast(value).has_instruction_stream()); - } else { - InstructionStream code = InstructionStream::cast(value); - if (code.kind() != CodeKind::BASELINE) { - DeoptimizationData deopt_data = - DeoptimizationData::cast(code.deoptimization_data()); - if (deopt_data.length() > 0) { - DeoptimizationLiteralArray literals = deopt_data.LiteralArray(); - int literals_length = literals.length(); - for (int i = 0; i < literals_length; ++i) { - MaybeObject maybe_literal = literals.Get(i); - HeapObject heap_literal; - if (maybe_literal.GetHeapObject(&heap_literal)) { - MarkObjectByPointer(Root::kStackRoots, - FullObjectSlot(&heap_literal)); - } - } - } - } + // Keep this synced with RootsReferencesExtractor::VisitRunningCode. + void VisitRunningCode(FullObjectSlot code_slot, + FullObjectSlot istream_or_smi_zero_slot) final { + Object istream_or_smi_zero = *istream_or_smi_zero_slot; + DCHECK(istream_or_smi_zero == Smi::zero() || + istream_or_smi_zero.IsInstructionStream()); + DCHECK_EQ(Code::cast(*code_slot).raw_instruction_stream(), + istream_or_smi_zero); + + if (istream_or_smi_zero != Smi::zero()) { + InstructionStream istream = InstructionStream::cast(istream_or_smi_zero); + // We must not remove deoptimization literals which may be needed in + // order to successfully deoptimize. + istream.IterateDeoptimizationLiterals(this); + VisitRootPointer(Root::kStackRoots, nullptr, istream_or_smi_zero_slot); } - // And then mark the InstructionStream itself. - VisitRootPointer(Root::kStackRoots, nullptr, p); + + VisitRootPointer(Root::kStackRoots, nullptr, code_slot); } private: @@ -1369,6 +1354,9 @@ class InternalizedStringTableCleaner final : public RootVisitor { int pointers_removed_ = 0; }; +enum class ExternalStringTableCleaningMode { kAll, kYoungOnly }; + +template class ExternalStringTableCleaner : public RootVisitor { public: explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {} @@ -1382,19 +1370,22 @@ class ExternalStringTableCleaner : public RootVisitor { Object the_hole = ReadOnlyRoots(heap_).the_hole_value(); for (FullObjectSlot p = start; p < end; ++p) { Object o = *p; - if (o.IsHeapObject()) { - HeapObject heap_object = HeapObject::cast(o); - if (marking_state->IsWhite(heap_object)) { - if (o.IsExternalString()) { - heap_->FinalizeExternalString(String::cast(o)); - } else { - // The original external string may have been internalized. - DCHECK(o.IsThinString()); - } - // Set the entry to the_hole_value (as deleted). - p.store(the_hole); - } + if (!o.IsHeapObject()) continue; + HeapObject heap_object = HeapObject::cast(o); + // MinorMC doesn't update the young strings set and so it may contain + // strings that are already in old space. + if (!marking_state->IsWhite(heap_object)) continue; + if ((mode == ExternalStringTableCleaningMode::kYoungOnly) && + !Heap::InYoungGeneration(heap_object)) + continue; + if (o.IsExternalString()) { + heap_->FinalizeExternalString(String::cast(o)); + } else { + // The original external string may have been internalized. + DCHECK(o.IsThinString()); } + // Set the entry to the_hole_value (as deleted). + p.store(the_hole); } } @@ -2706,6 +2697,7 @@ void MarkCompactCollector::RecordObjectStats() { if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return; // Cannot run during bootstrapping due to incomplete objects. if (isolate()->bootstrapper()->IsActive()) return; + TRACE_EVENT0(TRACE_GC_CATEGORIES, "V8.GC_OBJECT_DUMP_STATISTICS"); heap()->CreateObjectStats(); ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(), heap()->dead_object_stats_.get()); @@ -3105,7 +3097,8 @@ void MarkCompactCollector::ClearNonLiveReferences() { { TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_EXTERNAL_STRING_TABLE); - ExternalStringTableCleaner external_visitor(heap()); + ExternalStringTableCleaner + external_visitor(heap()); heap()->external_string_table_.IterateAll(&external_visitor); heap()->external_string_table_.CleanUpAll(); } @@ -5583,6 +5576,12 @@ void MarkCompactCollector::Sweep() { heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE_LO, ThreadKind::kMain); SweepLargeSpace(heap()->code_lo_space()); } + if (heap()->shared_space()) { + GCTracer::Scope sweep_scope(heap()->tracer(), + GCTracer::Scope::MC_SWEEP_SHARED_LO, + ThreadKind::kMain); + SweepLargeSpace(heap()->shared_lo_space()); + } { GCTracer::Scope sweep_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain); @@ -5780,17 +5779,6 @@ void MinorMarkCompactCollector::PerformWrapperTracing() { cpp_heap->AdvanceTracing(std::numeric_limits::infinity()); } -// static -bool MinorMarkCompactCollector::IsUnmarkedYoungHeapObject(Heap* heap, - FullObjectSlot p) { - Object o = *p; - if (!o.IsHeapObject()) return false; - HeapObject heap_object = HeapObject::cast(o); - MinorMarkCompactCollector* collector = heap->minor_mark_compact_collector(); - return ObjectInYoungGeneration(o) && - collector->non_atomic_marking_state()->IsWhite(heap_object); -} - class YoungGenerationRecordMigratedSlotVisitor final : public RecordMigratedSlotVisitor { public: @@ -6045,16 +6033,25 @@ void MinorMarkCompactCollector::ClearNonLiveReferences() { TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE); // Internalized strings are always stored in old space, so there is no // need to clean them here. - ExternalStringTableCleaner external_visitor(heap()); + ExternalStringTableCleaner + external_visitor(heap()); heap()->external_string_table_.IterateYoung(&external_visitor); heap()->external_string_table_.CleanUpYoung(); } - if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap()); - cpp_heap && cpp_heap->generational_gc_supported()) { + + { TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_GLOBAL_HANDLES); - isolate()->traced_handles()->ResetYoungDeadNodes( - &MinorMarkCompactCollector::IsUnmarkedYoungHeapObject); + isolate()->global_handles()->ProcessWeakYoungObjects( + nullptr, &IsUnmarkedObjectForYoungGeneration); + if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap()); + cpp_heap && cpp_heap->generational_gc_supported()) { + isolate()->traced_handles()->ResetYoungDeadNodes( + &IsUnmarkedObjectForYoungGeneration); + } else { + isolate()->traced_handles()->ProcessYoungObjects( + nullptr, &IsUnmarkedObjectForYoungGeneration); + } } } @@ -6096,9 +6093,7 @@ class YoungGenerationMarkingTask { marking_state_(heap->marking_state()), visitor_(isolate, marking_state_, marking_worklists_local()) {} - void MarkObject(Object object) { - if (!Heap::InYoungGeneration(object)) return; - HeapObject heap_object = HeapObject::cast(object); + void MarkYoungObject(HeapObject heap_object) { if (marking_state_->WhiteToGrey(heap_object)) { visitor_.Visit(heap_object); // Objects transition to black when visited. @@ -6106,7 +6101,7 @@ class YoungGenerationMarkingTask { } } - void EmptyMarkingWorklist() { + void DrainMarkingWorklist() { HeapObject object; while (marking_worklists_local_->Pop(&object) || marking_worklists_local_->PopOnHold(&object)) { @@ -6129,13 +6124,17 @@ class YoungGenerationMarkingTask { }; void PageMarkingItem::Process(YoungGenerationMarkingTask* task) { - TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "PageMarkingItem::Process"); base::MutexGuard guard(chunk_->mutex()); - MarkUntypedPointers(task); - MarkTypedPointers(task); + if (slots_type_ == SlotsType::kRegularSlots) { + MarkUntypedPointers(task); + } else { + MarkTypedPointers(task); + } } void PageMarkingItem::MarkUntypedPointers(YoungGenerationMarkingTask* task) { + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), + "PageMarkingItem::MarkUntypedPointers"); InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew( chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo); RememberedSet::Iterate( @@ -6151,6 +6150,8 @@ void PageMarkingItem::MarkUntypedPointers(YoungGenerationMarkingTask* task) { } void PageMarkingItem::MarkTypedPointers(YoungGenerationMarkingTask* task) { + TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), + "PageMarkingItem::MarkTypedPointers"); RememberedSet::IterateTyped( chunk_, [this, task](SlotType slot_type, Address slot) { return UpdateTypedSlotHelper::UpdateTypedSlot( @@ -6168,15 +6169,10 @@ V8_INLINE SlotCallbackResult PageMarkingItem::CheckAndMarkObject( std::is_same::value, "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here"); MaybeObject object = *slot; - if (Heap::InYoungGeneration(object)) { - // Marking happens before flipping the young generation, so the object - // has to be in a to page. - DCHECK(Heap::InToPage(object)); - HeapObject heap_object; - bool success = object.GetHeapObject(&heap_object); - USE(success); - DCHECK(success); - task->MarkObject(heap_object); + HeapObject heap_object; + if (object.GetHeapObject(&heap_object) && + Heap::InYoungGeneration(heap_object)) { + task->MarkYoungObject(heap_object); return KEEP_SLOT; } return REMOVE_SLOT; @@ -6200,7 +6196,7 @@ size_t YoungGenerationMarkingJob::GetMaxConcurrency(size_t worker_count) const { const int kPagesPerTask = 2; size_t items = remaining_marking_items_.load(std::memory_order_relaxed); size_t num_tasks; - if (!incremental()) { + if (ShouldDrainMarkingWorklist()) { num_tasks = std::max( (items + 1) / kPagesPerTask, global_worklists_->shared()->Size() + @@ -6224,8 +6220,8 @@ void YoungGenerationMarkingJob::ProcessItems(JobDelegate* delegate) { TimedScope scope(&marking_time); YoungGenerationMarkingTask task(isolate_, heap_, global_worklists_); ProcessMarkingItems(&task); - if (!incremental()) { - task.EmptyMarkingWorklist(); + if (ShouldDrainMarkingWorklist()) { + task.DrainMarkingWorklist(); } else { task.PublishMarkingWorklist(); } @@ -6250,8 +6246,8 @@ void YoungGenerationMarkingJob::ProcessMarkingItems( auto& work_item = marking_items_[i]; if (!work_item.TryAcquire()) break; work_item.Process(task); - if (!incremental()) { - task->EmptyMarkingWorklist(); + if (ShouldDrainMarkingWorklist()) { + task->DrainMarkingWorklist(); } if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <= 1) { @@ -6261,7 +6257,7 @@ void YoungGenerationMarkingJob::ProcessMarkingItems( } } -void MinorMarkCompactCollector::MarkRootSetInParallel( +void MinorMarkCompactCollector::MarkLiveObjectsInParallel( RootMarkingVisitor* root_visitor, bool was_marked_incrementally) { std::vector marking_items; @@ -6297,25 +6293,35 @@ void MinorMarkCompactCollector::MarkRootSetInParallel( // Create items for each page. RememberedSet::IterateMemoryChunks( heap(), [&marking_items](MemoryChunk* chunk) { - marking_items.emplace_back(chunk); + if (chunk->slot_set()) { + marking_items.emplace_back( + chunk, PageMarkingItem::SlotsType::kRegularSlots); + } else { + chunk->ReleaseInvalidatedSlots(); + } + + if (chunk->typed_slot_set()) { + marking_items.emplace_back( + chunk, PageMarkingItem::SlotsType::kTypedSlots); + } }); } } - // CppGC starts parallel marking tasks that will trace TracedReferences. Start - // if after marking of traced handles. - if (heap_->cpp_heap()) { - CppHeap::From(heap_->cpp_heap()) - ->EnterFinalPause(heap_->embedder_stack_state_); - } - // Add tasks and run in parallel. { + TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_CLOSURE_PARALLEL); + + // CppGC starts parallel marking tasks that will trace TracedReferences. + if (heap_->cpp_heap()) { + CppHeap::From(heap_->cpp_heap()) + ->EnterFinalPause(heap_->embedder_stack_state_); + } + // The main thread might hold local items, while GlobalPoolSize() == // 0. Flush to ensure these items are visible globally and picked up // by the job. local_marking_worklists_->Publish(); - TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_CLOSURE_PARALLEL); V8::GetCurrentPlatform() ->CreateJob(v8::TaskPriority::kUserBlocking, std::make_unique( @@ -6353,34 +6359,24 @@ void MinorMarkCompactCollector::MarkLiveObjects() { RootMarkingVisitor root_visitor(this); - MarkRootSetInParallel(&root_visitor, was_marked_incrementally); + MarkLiveObjectsInParallel(&root_visitor, was_marked_incrementally); - if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) { - cpp_heap->FinishConcurrentMarkingIfNeeded(); - } - - // Mark rest on the main thread. { + // Finish marking the transitive closure on the main thread. TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_CLOSURE); + if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) { + cpp_heap->FinishConcurrentMarkingIfNeeded(); + } DrainMarkingWorklist(); } - { - TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES); - isolate()->global_handles()->ProcessWeakYoungObjects( - &root_visitor, &IsUnmarkedObjectForYoungGeneration); - isolate()->traced_handles()->ProcessYoungObjects( - &root_visitor, &IsUnmarkedObjectForYoungGeneration); - DrainMarkingWorklist(); - } - - if (v8_flags.minor_mc_trace_fragmentation) { - TraceFragmentation(); - } - if (was_marked_incrementally) { MarkingBarrier::DeactivateAll(heap()); } + + if (v8_flags.minor_mc_trace_fragmentation) { + TraceFragmentation(); + } } void MinorMarkCompactCollector::DrainMarkingWorklist() { diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index e1fe165346..07f21f4355 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -11,6 +11,7 @@ #include "include/v8-internal.h" #include "src/heap/base/worklist.h" #include "src/heap/concurrent-marking.h" +#include "src/heap/index-generator.h" #include "src/heap/marking-state.h" #include "src/heap/marking-visitor.h" #include "src/heap/marking-worklist.h" @@ -34,6 +35,7 @@ class PagedNewSpace; class ReadOnlySpace; class RecordMigratedSlotVisitor; class UpdatingItem; +class YoungGenerationMarkingTask; class MarkBitCellIterator { public: @@ -710,8 +712,6 @@ class MinorMarkCompactCollector final : public CollectorBase { // Perform Wrapper Tracing if in use. void PerformWrapperTracing(); - static bool IsUnmarkedYoungHeapObject(Heap* heap, FullObjectSlot p); - private: class RootMarkingVisitor; @@ -721,8 +721,8 @@ class MinorMarkCompactCollector final : public CollectorBase { Sweeper* sweeper() { return sweeper_; } void MarkLiveObjects(); - void MarkRootSetInParallel(RootMarkingVisitor* root_visitor, - bool was_marked_incrementally); + void MarkLiveObjectsInParallel(RootMarkingVisitor* root_visitor, + bool was_marked_incrementally); V8_INLINE void MarkRootObject(HeapObject obj); void DrainMarkingWorklist(); void TraceFragmentation(); @@ -753,6 +753,65 @@ class MinorMarkCompactCollector final : public CollectorBase { friend class YoungGenerationMainMarkingVisitor; }; +class PageMarkingItem : public ParallelWorkItem { + public: + enum class SlotsType { kRegularSlots, kTypedSlots }; + + PageMarkingItem(MemoryChunk* chunk, SlotsType slots_type) + : chunk_(chunk), slots_type_(slots_type) {} + ~PageMarkingItem() = default; + + void Process(YoungGenerationMarkingTask* task); + + private: + inline Heap* heap() { return chunk_->heap(); } + + void MarkUntypedPointers(YoungGenerationMarkingTask* task); + void MarkTypedPointers(YoungGenerationMarkingTask* task); + template + V8_INLINE SlotCallbackResult + CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot); + + MemoryChunk* chunk_; + const SlotsType slots_type_; +}; + +enum class YoungMarkingJobType { kAtomic, kIncremental }; + +class YoungGenerationMarkingJob : public v8::JobTask { + public: + YoungGenerationMarkingJob(Isolate* isolate, Heap* heap, + MarkingWorklists* global_worklists, + std::vector marking_items, + YoungMarkingJobType young_marking_job_type) + : isolate_(isolate), + heap_(heap), + global_worklists_(global_worklists), + marking_items_(std::move(marking_items)), + remaining_marking_items_(marking_items_.size()), + generator_(marking_items_.size()), + young_marking_job_type_(young_marking_job_type) {} + + void Run(JobDelegate* delegate) override; + size_t GetMaxConcurrency(size_t worker_count) const override; + + bool ShouldDrainMarkingWorklist() const { + return young_marking_job_type_ == YoungMarkingJobType::kAtomic; + } + + private: + void ProcessItems(JobDelegate* delegate); + void ProcessMarkingItems(YoungGenerationMarkingTask* task); + + Isolate* isolate_; + Heap* heap_; + MarkingWorklists* global_worklists_; + std::vector marking_items_; + std::atomic_size_t remaining_marking_items_{0}; + IndexGenerator generator_; + YoungMarkingJobType young_marking_job_type_; +}; + } // namespace internal } // namespace v8 diff --git a/src/heap/memory-chunk-layout.h b/src/heap/memory-chunk-layout.h index 67e3a7b9dd..fd65684b12 100644 --- a/src/heap/memory-chunk-layout.h +++ b/src/heap/memory-chunk-layout.h @@ -39,11 +39,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout { public: static constexpr int kNumSets = NUMBER_OF_REMEMBERED_SET_TYPES; static constexpr int kNumTypes = ExternalBackingStoreType::kNumTypes; -#if V8_CC_MSVC && V8_TARGET_ARCH_IA32 - static constexpr int kMemoryChunkAlignment = 8; -#else static constexpr int kMemoryChunkAlignment = sizeof(size_t); -#endif // V8_CC_MSVC && V8_TARGET_ARCH_IA32 #define FIELD(Type, Name) \ k##Name##Offset, k##Name##End = k##Name##Offset + sizeof(Type) - 1 enum Header { @@ -73,7 +69,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout { FIELD(FreeListCategory**, Categories), FIELD(CodeObjectRegistry*, CodeObjectRegistry), FIELD(PossiblyEmptyBuckets, PossiblyEmptyBuckets), - FIELD(ActiveSystemPages, ActiveSystemPages), + FIELD(ActiveSystemPages*, ActiveSystemPages), #ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB FIELD(ObjectStartBitmap, ObjectStartBitmap), #endif // V8_ENABLE_INNER_POINTER_RESOLUTION_OSB diff --git a/src/heap/memory-chunk.cc b/src/heap/memory-chunk.cc index a2238416f5..6a98d11ffe 100644 --- a/src/heap/memory-chunk.cc +++ b/src/heap/memory-chunk.cc @@ -182,11 +182,13 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size, possibly_empty_buckets_.Initialize(); if (page_size == PageSize::kRegular) { - active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize, - MemoryAllocator::GetCommitPageSizeBits(), size()); + active_system_pages_ = new ActiveSystemPages; + active_system_pages_->Init(MemoryChunkLayout::kMemoryChunkHeaderSize, + MemoryAllocator::GetCommitPageSizeBits(), + size()); } else { // We do not track active system pages for large pages. - active_system_pages_.Clear(); + active_system_pages_ = nullptr; } // All pages of a shared heap need to be marked with this flag. @@ -202,7 +204,7 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size, size_t MemoryChunk::CommittedPhysicalMemory() const { if (!base::OS::HasLazyCommits() || IsLargePage()) return size(); - return active_system_pages_.Size(MemoryAllocator::GetCommitPageSizeBits()); + return active_system_pages_->Size(MemoryAllocator::GetCommitPageSizeBits()); } void MemoryChunk::SetOldGenerationPageFlags(bool is_marking) { @@ -245,6 +247,11 @@ void MemoryChunk::ReleaseAllocatedMemoryNeededForWritableChunk() { code_object_registry_ = nullptr; } + if (active_system_pages_ != nullptr) { + delete active_system_pages_; + active_system_pages_ = nullptr; + } + possibly_empty_buckets_.Release(); ReleaseSlotSet(); ReleaseSlotSet(); diff --git a/src/heap/memory-chunk.h b/src/heap/memory-chunk.h index 8e74f9edc2..f532e28de0 100644 --- a/src/heap/memory-chunk.h +++ b/src/heap/memory-chunk.h @@ -297,7 +297,7 @@ class MemoryChunk : public BasicMemoryChunk { PossiblyEmptyBuckets possibly_empty_buckets_; - ActiveSystemPages active_system_pages_; + ActiveSystemPages* active_system_pages_; #ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB ObjectStartBitmap object_start_bitmap_; diff --git a/src/heap/paged-spaces.h b/src/heap/paged-spaces.h index c06ca275f2..14a9c652dd 100644 --- a/src/heap/paged-spaces.h +++ b/src/heap/paged-spaces.h @@ -291,7 +291,9 @@ class V8_EXPORT_PRIVATE PagedSpaceBase base::Optional guard_; }; - bool SupportsConcurrentAllocation() const { return !is_compaction_space(); } + bool SupportsConcurrentAllocation() const { + return !is_compaction_space() && (identity() != NEW_SPACE); + } // Set space linear allocation area. void SetTopAndLimit(Address top, Address limit); diff --git a/src/heap/remembered-set-inl.h b/src/heap/remembered-set-inl.h index bd8f4adf96..6af093d25e 100644 --- a/src/heap/remembered-set-inl.h +++ b/src/heap/remembered-set-inl.h @@ -38,7 +38,7 @@ SlotCallbackResult UpdateTypedSlotHelper::UpdateTypedSlot(Heap* heap, } case SlotType::kConstPoolEmbeddedObjectCompressed: { HeapObject old_target = - HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny( + HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged( heap->isolate(), base::Memory(addr)))); HeapObject new_target = old_target; SlotCallbackResult result = callback(FullMaybeObjectSlot(&new_target)); @@ -80,7 +80,7 @@ HeapObject UpdateTypedSlotHelper::GetTargetObject(Heap* heap, return rinfo.target_object(heap->isolate()); } case SlotType::kConstPoolEmbeddedObjectCompressed: { - Address full = V8HeapCompressionScheme::DecompressTaggedAny( + Address full = V8HeapCompressionScheme::DecompressTagged( heap->isolate(), base::Memory(addr)); return HeapObject::cast(Object(full)); } diff --git a/src/heap/scavenger-inl.h b/src/heap/scavenger-inl.h index bc3b97f667..96a192f076 100644 --- a/src/heap/scavenger-inl.h +++ b/src/heap/scavenger-inl.h @@ -298,7 +298,7 @@ SlotCallbackResult Scavenger::EvacuateThinString(Map map, THeapObjectSlot slot, static_assert(std::is_same::value || std::is_same::value, "Only FullHeapObjectSlot and HeapObjectSlot are expected here"); - if (!is_incremental_marking_ && shortcut_strings_) { + if (shortcut_strings_) { // The ThinString should die after Scavenge, so avoid writing the proper // forwarding pointer and instead just signal the actual object as forwarded // reference. @@ -326,7 +326,7 @@ SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map, "Only FullHeapObjectSlot and HeapObjectSlot are expected here"); DCHECK(IsShortcutCandidate(map.instance_type())); - if (!is_incremental_marking_ && shortcut_strings_ && + if (shortcut_strings_ && object.unchecked_second() == ReadOnlyRoots(heap()).empty_string()) { HeapObject first = HeapObject::cast(object.unchecked_first()); diff --git a/src/heap/scavenger.cc b/src/heap/scavenger.cc index ec4ea57576..d7689171b4 100644 --- a/src/heap/scavenger.cc +++ b/src/heap/scavenger.cc @@ -610,6 +610,18 @@ ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) { } return nullptr; } + +// This returns true if the scavenger runs in a client isolate and incremental +// marking is enabled in the shared space isolate. +bool IsSharedIncrementalMarking(Isolate* isolate) { + return isolate->has_shared_heap() && v8_flags.shared_space && + !isolate->is_shared_space_isolate() && + isolate->shared_space_isolate() + ->heap() + ->incremental_marking() + ->IsMarking(); +} + } // namespace Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging, @@ -633,8 +645,10 @@ Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging, is_compacting_(heap->incremental_marking()->IsCompacting()), shared_string_table_(shared_old_allocator_.get() != nullptr), mark_shared_heap_(heap->isolate()->is_shared_space_isolate()), - shortcut_strings_(!heap->IsGCWithStack() || - v8_flags.shortcut_strings_with_stack) {} + shortcut_strings_( + (!heap->IsGCWithStack() || v8_flags.shortcut_strings_with_stack) && + !is_incremental_marking_ && + !IsSharedIncrementalMarking(heap->isolate())) {} void Scavenger::IterateAndScavengePromotedObject(HeapObject target, Map map, int size) { diff --git a/src/heap/setup-heap-internal.cc b/src/heap/setup-heap-internal.cc index 44d9e14585..9a1d68a1b1 100644 --- a/src/heap/setup-heap-internal.cc +++ b/src/heap/setup-heap-internal.cc @@ -440,8 +440,6 @@ bool Heap::CreateInitialReadOnlyMaps() { ALLOCATE_VARSIZE_MAP(FEEDBACK_VECTOR_TYPE, feedback_vector) ALLOCATE_PRIMITIVE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number, Context::NUMBER_FUNCTION_INDEX) - ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol, - Context::SYMBOL_FUNCTION_INDEX) ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign) ALLOCATE_MAP(MEGA_DOM_HANDLER_TYPE, MegaDomHandler::kSize, mega_dom_handler) @@ -457,6 +455,9 @@ bool Heap::CreateInitialReadOnlyMaps() { ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, basic_block_counters_marker); ALLOCATE_VARSIZE_MAP(BIGINT_TYPE, bigint); + ALLOCATE_PRIMITIVE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol, + Context::SYMBOL_FUNCTION_INDEX) + for (unsigned i = 0; i < arraysize(string_type_table); i++) { const StringTypeTable& entry = string_type_table[i]; Map map; diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 53e35d129c..29ac3ad0ed 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -315,7 +315,7 @@ class Page : public MemoryChunk { void AllocateFreeListCategories(); void ReleaseFreeListCategories(); - ActiveSystemPages* active_system_pages() { return &active_system_pages_; } + ActiveSystemPages* active_system_pages() { return active_system_pages_; } template void ClearTypedSlotsInFreeMemory(const TypedSlotSet::FreeRangesMap& ranges) { diff --git a/src/init/heap-symbols.h b/src/init/heap-symbols.h index e6be75428e..ea50eafb23 100644 --- a/src/init/heap-symbols.h +++ b/src/init/heap-symbols.h @@ -629,6 +629,7 @@ F(MC_SWEEP_NEW_LO) \ F(MC_SWEEP_OLD) \ F(MC_SWEEP_SHARED) \ + F(MC_SWEEP_SHARED_LO) \ F(MINOR_MARK_COMPACTOR) \ F(MINOR_MC) \ TOP_MINOR_MC_SCOPES(F) \ @@ -643,7 +644,6 @@ F(MINOR_MC_EVACUATE_REBALANCE) \ F(MINOR_MC_EVACUATE_UPDATE_STRING_TABLE) \ F(MINOR_MC_FINISH_SWEEP_ARRAY_BUFFERS) \ - F(MINOR_MC_MARK_GLOBAL_HANDLES) \ F(MINOR_MC_MARK_FINISH_INCREMENTAL) \ F(MINOR_MC_MARK_PARALLEL) \ F(MINOR_MC_MARK_SEED) \ diff --git a/src/logging/log.cc b/src/logging/log.cc index 671dd69403..9834f4df3e 100644 --- a/src/logging/log.cc +++ b/src/logging/log.cc @@ -19,6 +19,7 @@ #include "src/codegen/bailout-reason.h" #include "src/codegen/macro-assembler.h" #include "src/codegen/source-position-table.h" +#include "src/common/assert-scope.h" #include "src/deoptimizer/deoptimizer.h" #include "src/diagnostics/perf-jit.h" #include "src/execution/isolate.h" @@ -248,7 +249,8 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle code, DCHECK(is_listening_to_code_events()); name_buffer_->Init(tag); name_buffer_->AppendBytes(comment); - LogRecordedBuffer(code, MaybeHandle(), + DisallowGarbageCollection no_gc; + LogRecordedBuffer(*code, MaybeHandle(), name_buffer_->get(), name_buffer_->size()); } @@ -257,7 +259,8 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle code, DCHECK(is_listening_to_code_events()); name_buffer_->Init(tag); name_buffer_->AppendName(*name); - LogRecordedBuffer(code, MaybeHandle(), + DisallowGarbageCollection no_gc; + LogRecordedBuffer(*code, MaybeHandle(), name_buffer_->get(), name_buffer_->size()); } @@ -269,7 +272,8 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle code, name_buffer_->AppendBytes(ComputeMarker(*shared, *code)); name_buffer_->AppendByte(' '); name_buffer_->AppendName(*script_name); - LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size()); + DisallowGarbageCollection no_gc; + LogRecordedBuffer(*code, shared, name_buffer_->get(), name_buffer_->size()); } void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle code, @@ -292,7 +296,8 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, Handle code, name_buffer_->AppendInt(line); name_buffer_->AppendByte(':'); name_buffer_->AppendInt(column); - LogRecordedBuffer(code, shared, name_buffer_->get(), name_buffer_->size()); + DisallowGarbageCollection no_gc; + LogRecordedBuffer(*code, shared, name_buffer_->get(), name_buffer_->size()); } #if V8_ENABLE_WEBASSEMBLY @@ -312,6 +317,7 @@ void CodeEventLogger::CodeCreateEvent(CodeTag tag, const wasm::WasmCode* code, } name_buffer_->AppendByte('-'); name_buffer_->AppendBytes(ExecutionTierToString(code->tier())); + DisallowGarbageCollection no_gc; LogRecordedBuffer(code, name_buffer_->get(), name_buffer_->size()); } #endif // V8_ENABLE_WEBASSEMBLY @@ -321,7 +327,8 @@ void CodeEventLogger::RegExpCodeCreateEvent(Handle code, DCHECK(is_listening_to_code_events()); name_buffer_->Init(LogEventListener::CodeTag::kRegExp); name_buffer_->AppendString(*source); - LogRecordedBuffer(code, MaybeHandle(), + DisallowGarbageCollection no_gc; + LogRecordedBuffer(*code, MaybeHandle(), name_buffer_->get(), name_buffer_->size()); } @@ -338,7 +345,7 @@ class LinuxPerfBasicLogger : public CodeEventLogger { Handle shared) override {} private: - void LogRecordedBuffer(Handle code, + void LogRecordedBuffer(AbstractCode code, MaybeHandle maybe_shared, const char* name, int length) override; #if V8_ENABLE_WEBASSEMBLY @@ -414,18 +421,19 @@ void LinuxPerfBasicLogger::WriteLogRecordedBuffer(uintptr_t address, int size, size, name_length, name); } -void LinuxPerfBasicLogger::LogRecordedBuffer(Handle code, +void LinuxPerfBasicLogger::LogRecordedBuffer(AbstractCode code, MaybeHandle, const char* name, int length) { + DisallowGarbageCollection no_gc; PtrComprCageBase cage_base(isolate_); if (v8_flags.perf_basic_prof_only_functions && - CodeKindIsBuiltinOrJSFunction(code->kind(cage_base))) { + CodeKindIsBuiltinOrJSFunction(code.kind(cage_base))) { return; } WriteLogRecordedBuffer( - static_cast(code->InstructionStart(cage_base)), - code->InstructionSize(cage_base), name, length); + static_cast(code.InstructionStart(cage_base)), + code.InstructionSize(cage_base), name, length); } #if V8_ENABLE_WEBASSEMBLY @@ -637,7 +645,7 @@ class LowLevelLogger : public CodeEventLogger { void CodeMovingGCEvent() override; private: - void LogRecordedBuffer(Handle code, + void LogRecordedBuffer(AbstractCode code, MaybeHandle maybe_shared, const char* name, int length) override; #if V8_ENABLE_WEBASSEMBLY @@ -727,19 +735,19 @@ void LowLevelLogger::LogCodeInfo() { LogWriteBytes(arch, sizeof(arch)); } -void LowLevelLogger::LogRecordedBuffer(Handle code, +void LowLevelLogger::LogRecordedBuffer(AbstractCode code, MaybeHandle, const char* name, int length) { + DisallowGarbageCollection no_gc; PtrComprCageBase cage_base(isolate_); CodeCreateStruct event; event.name_size = length; - event.code_address = code->InstructionStart(cage_base); - event.code_size = code->InstructionSize(cage_base); + event.code_address = code.InstructionStart(cage_base); + event.code_size = code.InstructionSize(cage_base); LogWriteStruct(event); LogWriteBytes(name, length); - LogWriteBytes( - reinterpret_cast(code->InstructionStart(cage_base)), - code->InstructionSize(cage_base)); + LogWriteBytes(reinterpret_cast(code.InstructionStart(cage_base)), + code.InstructionSize(cage_base)); } #if V8_ENABLE_WEBASSEMBLY @@ -801,7 +809,7 @@ class JitLogger : public CodeEventLogger { JitCodeEvent::CodeType code_type); private: - void LogRecordedBuffer(Handle code, + void LogRecordedBuffer(AbstractCode code, MaybeHandle maybe_shared, const char* name, int length) override; #if V8_ENABLE_WEBASSEMBLY @@ -818,16 +826,17 @@ JitLogger::JitLogger(Isolate* isolate, JitCodeEventHandler code_event_handler) DCHECK_NOT_NULL(code_event_handler); } -void JitLogger::LogRecordedBuffer(Handle code, +void JitLogger::LogRecordedBuffer(AbstractCode code, MaybeHandle maybe_shared, const char* name, int length) { + DisallowGarbageCollection no_gc; PtrComprCageBase cage_base(isolate_); JitCodeEvent event; event.type = JitCodeEvent::CODE_ADDED; - event.code_start = reinterpret_cast(code->InstructionStart(cage_base)); - event.code_type = code->IsCode(cage_base) ? JitCodeEvent::JIT_CODE - : JitCodeEvent::BYTE_CODE; - event.code_len = code->InstructionSize(cage_base); + event.code_start = reinterpret_cast(code.InstructionStart(cage_base)); + event.code_type = + code.IsCode(cage_base) ? JitCodeEvent::JIT_CODE : JitCodeEvent::BYTE_CODE; + event.code_len = code.InstructionSize(cage_base); Handle shared; if (maybe_shared.ToHandle(&shared) && shared->script(cage_base).IsScript(cage_base)) { diff --git a/src/logging/log.h b/src/logging/log.h index 3e211388ee..d411da512b 100644 --- a/src/logging/log.h +++ b/src/logging/log.h @@ -448,7 +448,7 @@ class V8_EXPORT_PRIVATE CodeEventLogger : public LogEventListener { private: class NameBuffer; - virtual void LogRecordedBuffer(Handle code, + virtual void LogRecordedBuffer(AbstractCode code, MaybeHandle maybe_shared, const char* name, int length) = 0; #if V8_ENABLE_WEBASSEMBLY diff --git a/src/maglev/arm64/maglev-assembler-arm64-inl.h b/src/maglev/arm64/maglev-assembler-arm64-inl.h index 5c8f006d51..01765d3b4b 100644 --- a/src/maglev/arm64/maglev-assembler-arm64-inl.h +++ b/src/maglev/arm64/maglev-assembler-arm64-inl.h @@ -34,6 +34,19 @@ constexpr Condition ConditionFor(Operation operation) { } } +inline int ShiftFromScale(int n) { + switch (n) { + case 1: + return 0; + case 2: + return 1; + case 4: + return 2; + default: + UNREACHABLE(); + } +} + class MaglevAssembler::ScratchRegisterScope { public: explicit ScratchRegisterScope(MaglevAssembler* masm) : wrapped_scope_(masm) { @@ -352,6 +365,18 @@ inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer, Add(data_pointer, data_pointer, base); } +inline void MaglevAssembler::LoadTaggedFieldByIndex(Register result, + Register object, + Register index, int scale, + int offset) { + if (scale == 1) { + Add(result, object, index); + } else { + Add(result, object, Operand(index, LSL, ShiftFromScale(scale / 2))); + } + MacroAssembler::LoadTaggedField(result, FieldMemOperand(result, offset)); +} + inline void MaglevAssembler::LoadBoundedSizeFromObject(Register result, Register object, int offset) { @@ -373,26 +398,26 @@ inline void MaglevAssembler::LoadExternalPointerField(Register result, inline void MaglevAssembler::LoadSignedField(Register result, MemOperand operand, int size) { if (size == 1) { - ldrsb(result, operand); + Ldrsb(result, operand); } else if (size == 2) { - ldrsh(result, operand); + Ldrsh(result, operand); } else { DCHECK_EQ(size, 4); DCHECK(result.IsW()); - ldr(result, operand); + Ldr(result, operand); } } inline void MaglevAssembler::LoadUnsignedField(Register result, MemOperand operand, int size) { if (size == 1) { - ldrb(result, operand); + Ldrb(result, operand); } else if (size == 2) { - ldrh(result, operand); + Ldrh(result, operand); } else { DCHECK_EQ(size, 4); DCHECK(result.IsW()); - ldr(result, operand); + Ldr(result, operand); } } @@ -400,13 +425,13 @@ inline void MaglevAssembler::StoreField(MemOperand operand, Register value, int size) { DCHECK(size == 1 || size == 2 || size == 4); if (size == 1) { - strb(value, operand); + Strb(value, operand); } else if (size == 2) { - strh(value, operand); + Strh(value, operand); } else { DCHECK_EQ(size, 4); DCHECK(value.IsW()); - str(value, operand); + Str(value, operand); } } @@ -445,7 +470,7 @@ inline void MaglevAssembler::Move(DoubleRegister dst, MemOperand src) { Ldr(dst, src); } inline void MaglevAssembler::Move(DoubleRegister dst, DoubleRegister src) { - fmov(dst, src); + Fmov(dst, src); } inline void MaglevAssembler::Move(Register dst, Smi src) { MacroAssembler::Move(dst, src); @@ -472,6 +497,9 @@ inline void MaglevAssembler::Move(Register dst, Handle obj) { inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) { Mov(dst, Operand(src.W(), SXTW)); } +inline void MaglevAssembler::NegateInt32(Register val) { + Neg(val.W(), val.W()); +} template inline void MaglevAssembler::DeoptIfBufferDetached(Register array, @@ -483,10 +511,10 @@ inline void MaglevAssembler::DeoptIfBufferDetached(Register array, ->DependOnArrayBufferDetachingProtector()) { // A detached buffer leads to megamorphic feedback, so we won't have a deopt // loop if we deopt here. - LoadTaggedPointerField( - scratch, FieldMemOperand(array, JSArrayBufferView::kBufferOffset)); - LoadTaggedPointerField( - scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); + LoadTaggedField(scratch, + FieldMemOperand(array, JSArrayBufferView::kBufferOffset)); + LoadTaggedField(scratch, + FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset)); Tst(scratch.W(), Immediate(JSArrayBuffer::WasDetachedBit::kMask)); EmitEagerDeoptIf(ne, DeoptimizeReason::kArrayBufferWasDetached, node); } @@ -519,6 +547,20 @@ inline void MaglevAssembler::CompareObjectTypeRange(Register heap_object, CompareInstanceTypeRange(scratch, scratch, lower_limit, higher_limit); } +inline void MaglevAssembler::CompareInstanceTypeRange( + Register map, InstanceType lower_limit, InstanceType higher_limit) { + ScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + CompareInstanceTypeRange(map, scratch, lower_limit, higher_limit); +} + +inline void MaglevAssembler::CompareInstanceTypeRange( + Register map, Register instance_type_out, InstanceType lower_limit, + InstanceType higher_limit) { + MacroAssembler::CompareInstanceTypeRange(map, instance_type_out, lower_limit, + higher_limit); +} + inline void MaglevAssembler::CompareTagged(Register reg, Handle obj) { ScratchRegisterScope temps(this); @@ -539,7 +581,7 @@ inline void MaglevAssembler::Jump(Label* target, Label::Distance) { B(target); } inline void MaglevAssembler::JumpIf(Condition cond, Label* target, Label::Distance) { - b(target, cond); + B(target, cond); } inline void MaglevAssembler::JumpIfRoot(Register with, RootIndex index, @@ -571,6 +613,23 @@ inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2, CompareAndBranch(r1.W(), r2.W(), cond, target); } +inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, int32_t value, + Condition cond, + Label* target, + Label::Distance distance) { + CompareAndBranch(r1.W(), Immediate(value), cond, target); +} + +inline void MaglevAssembler::TestInt32AndJumpIfAnySet( + Register r1, int32_t mask, Label* target, Label::Distance distance) { + TestAndBranchIfAnySet(r1.W(), mask, target); +} + +inline void MaglevAssembler::TestInt32AndJumpIfAllClear( + Register r1, int32_t mask, Label* target, Label::Distance distance) { + TestAndBranchIfAllClear(r1.W(), mask, target); +} + inline void MaglevAssembler::LoadHeapNumberValue(DoubleRegister result, Register heap_number) { Ldr(result, FieldMemOperand(heap_number, HeapNumber::kValueOffset)); diff --git a/src/maglev/arm64/maglev-assembler-arm64.cc b/src/maglev/arm64/maglev-assembler-arm64.cc index 289eae2402..8b66691465 100644 --- a/src/maglev/arm64/maglev-assembler-arm64.cc +++ b/src/maglev/arm64/maglev-assembler-arm64.cc @@ -66,7 +66,7 @@ void MaglevAssembler::Allocate(RegisterSnapshot& register_snapshot, save_register_state.DefineSafepoint(); __ Move(object, kReturnRegister0); } - __ jmp(*done); + __ B(*done); }, register_snapshot, object, in_new_space ? Builtin::kAllocateRegularInYoungGeneration @@ -314,7 +314,7 @@ void MaglevAssembler::Prologue(Graph* graph) { Register flags = temps.Acquire(); Register feedback_vector = temps.Acquire(); - DeferredCodeInfo* deferred_flags_need_processing = PushDeferredCode( + Label* deferred_flags_need_processing = MakeDeferredCode( [](MaglevAssembler* masm, Register flags, Register feedback_vector) { ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); // TODO(leszeks): This could definitely be a builtin that we @@ -328,7 +328,7 @@ void MaglevAssembler::Prologue(Graph* graph) { compilation_info()->toplevel_compilation_unit()->feedback().object()); LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::MAGLEV, - &deferred_flags_need_processing->deferred_code_label); + deferred_flags_need_processing); } EnterFrame(StackFrame::MAGLEV); @@ -423,7 +423,7 @@ void MaglevAssembler::Prologue(Graph* graph) { Push(xzr, xzr); } Subs(count, count, Immediate(1)); - b(&loop, gt); + B(&loop, gt); } } if (remaining_stack_slots > 0) { @@ -431,7 +431,7 @@ void MaglevAssembler::Prologue(Graph* graph) { remaining_stack_slots += (remaining_stack_slots % 2); // Extend sp by the size of the remaining untagged part of the frame, // no need to initialise these. - sub(sp, sp, Immediate(remaining_stack_slots * kSystemPointerSize)); + Sub(sp, sp, Immediate(remaining_stack_slots * kSystemPointerSize)); } } @@ -488,7 +488,7 @@ void MaglevAssembler::LoadSingleCharacterString(Register result, Register table = scratch; LoadRoot(table, RootIndex::kSingleCharacterStringTable); Add(table, table, Operand(char_code, LSL, kTaggedSizeLog2)); - DecompressAnyTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize)); + DecompressTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize)); } void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot, @@ -538,7 +538,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot, Label cons_string; Label sliced_string; - DeferredCodeInfo* deferred_runtime_call = PushDeferredCode( + Label* deferred_runtime_call = MakeDeferredCode( [](MaglevAssembler* masm, RegisterSnapshot register_snapshot, ZoneLabelRef done, Register result, Register string, Register index) { DCHECK(!register_snapshot.live_registers.has(result)); @@ -598,14 +598,14 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot, Cmp(representation, Immediate(kSlicedStringTag)); B(&sliced_string, eq); Cmp(representation, Immediate(kThinStringTag)); - B(&deferred_runtime_call->deferred_code_label, ne); + B(deferred_runtime_call, ne); // Fallthrough to thin string. } // Is a thin string. { - DecompressAnyTagged(string, - FieldMemOperand(string, ThinString::kActualOffset)); + DecompressTagged(string, + FieldMemOperand(string, ThinString::kActualOffset)); B(&loop); } @@ -616,8 +616,8 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot, Ldr(offset.W(), FieldMemOperand(string, SlicedString::kOffsetOffset)); SmiUntag(offset); - DecompressAnyTagged(string, - FieldMemOperand(string, SlicedString::kParentOffset)); + DecompressTagged(string, + FieldMemOperand(string, SlicedString::kParentOffset)); Add(index, index, offset); B(&loop); } @@ -629,9 +629,8 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot, Register second_string = instance_type; Ldr(second_string.W(), FieldMemOperand(string, ConsString::kSecondOffset)); CompareRoot(second_string, RootIndex::kempty_string); - B(&deferred_runtime_call->deferred_code_label, ne); - DecompressAnyTagged(string, - FieldMemOperand(string, ConsString::kFirstOffset)); + B(deferred_runtime_call, ne); + DecompressTagged(string, FieldMemOperand(string, ConsString::kFirstOffset)); B(&loop); // Try again with first string. } diff --git a/src/maglev/arm64/maglev-ir-arm64.cc b/src/maglev/arm64/maglev-ir-arm64.cc index b320aed150..a0cd986ac7 100644 --- a/src/maglev/arm64/maglev-ir-arm64.cc +++ b/src/maglev/arm64/maglev-ir-arm64.cc @@ -6,6 +6,7 @@ #include "src/codegen/arm64/assembler-arm64-inl.h" #include "src/codegen/arm64/register-arm64.h" #include "src/codegen/interface-descriptors-inl.h" +#include "src/maglev/arm64/maglev-assembler-arm64-inl.h" #include "src/maglev/maglev-assembler-inl.h" #include "src/maglev/maglev-graph-processor.h" #include "src/maglev/maglev-graph.h" @@ -35,7 +36,7 @@ void Int32NegateWithOverflow::GenerateCode(MaglevAssembler* masm, __ RecordComment("-- Jump to eager deopt"); __ Cbz(value, fail); - __ negs(out, value); + __ Negs(out, value); // Output register must not be a register input into the eager deopt info. DCHECK_REGLIST_EMPTY(RegList{out} & GetGeneralRegistersUsedAsInputs(eager_deopt_info())); @@ -92,8 +93,8 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm, __ CompareObjectType(object, FIRST_JS_OBJECT_TYPE, scratch); __ Assert(ge, AbortReason::kUnexpectedValue); } - __ LoadAnyTaggedField(scratch, - FieldMemOperand(object, JSObject::kElementsOffset)); + __ LoadTaggedField(scratch, + FieldMemOperand(object, JSObject::kElementsOffset)); if (v8_flags.debug_code) { __ AssertNotSmi(scratch); } @@ -154,7 +155,7 @@ void BuiltinStringPrototypeCharCodeAt::GenerateCode( __ StringCharCodeAt(save_registers, ToRegister(result()), ToRegister(string_input()), ToRegister(index_input()), scratch, &done); - __ bind(&done); + __ Bind(&done); } int CreateEmptyObjectLiteral::MaxCallStackArgs() const { @@ -283,7 +284,7 @@ void CheckedTruncateFloat64ToInt32::GenerateCode(MaglevAssembler* masm, __ Cmp(high_word32_of_input, wzr); __ EmitEagerDeoptIf(lt, DeoptimizeReason::kNotInt32, this); - __ bind(&check_done); + __ Bind(&check_done); } void CheckedTruncateFloat64ToUint32::SetValueLocationConstraints() { @@ -318,7 +319,7 @@ void CheckedTruncateFloat64ToUint32::GenerateCode( __ Cmp(high_word32_of_input, wzr); __ EmitEagerDeoptIf(lt, DeoptimizeReason::kNotUint32, this); - __ bind(&check_done); + __ Bind(&check_done); } namespace { @@ -332,7 +333,7 @@ void EmitTruncateNumberToInt32(MaglevAssembler* masm, Register value, // If Smi, convert to Int32. __ SmiToInt32(result_reg, value); __ B(&done); - __ bind(&is_not_smi); + __ Bind(&is_not_smi); if (not_a_number != nullptr) { // Check if HeapNumber, deopt otherwise. Register scratch = temps.Acquire().W(); @@ -349,7 +350,7 @@ void EmitTruncateNumberToInt32(MaglevAssembler* masm, Register value, DoubleRegister double_value = temps.AcquireDouble(); __ Ldr(double_value, FieldMemOperand(value, HeapNumber::kValueOffset)); __ TruncateDoubleToInt32(result_reg, double_value); - __ bind(&done); + __ Bind(&done); } } // namespace @@ -420,7 +421,7 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm, __ Move(map, last_map_handle); __ CmpTagged(object_map, map); __ EmitEagerDeoptIf(ne, DeoptimizeReason::kWrongMap, this); - __ bind(&done); + __ Bind(&done); } int CheckMapsWithMigration::MaxCallStackArgs() const { @@ -540,13 +541,13 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, } if (!last_map) { - // We don't need to bind the label for the last map. + // We don't need to Bind the label for the last map. __ B(*done, eq); - __ bind(*continue_label); + __ Bind(*continue_label); } } - __ bind(*done); + __ Bind(*done); } void CheckNumber::SetValueLocationConstraints() { @@ -573,7 +574,7 @@ void CheckNumber::GenerateCode(MaglevAssembler* masm, __ CompareRoot(scratch, RootIndex::kHeapNumberMap); } __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotANumber, this); - __ bind(&done); + __ Bind(&done); } int CheckedObjectToIndex::MaxCallStackArgs() const { return 0; } @@ -599,7 +600,7 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm, __ LoadMap(scratch, object); __ CompareInstanceTypeRange(scratch, scratch, FIRST_STRING_TYPE, LAST_STRING_TYPE); - __ b(&is_string, ls); + __ B(&is_string, ls); __ Cmp(scratch, Immediate(HEAP_NUMBER_TYPE)); // The IC will go generic if it encounters something other than a @@ -624,7 +625,7 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm, } // String. - __ bind(&is_string); + __ Bind(&is_string); { RegisterSnapshot snapshot = node->register_snapshot(); snapshot.live_registers.clear(result_reg); @@ -652,7 +653,7 @@ void CheckedObjectToIndex::GenerateCode(MaglevAssembler* masm, __ SmiToInt32(result_reg, object); } - __ bind(*done); + __ Bind(*done); } void Int32ToNumber::SetValueLocationConstraints() { @@ -682,7 +683,7 @@ void Int32ToNumber::GenerateCode(MaglevAssembler* masm, }, object, value, scratch, done, this); __ Mov(object, scratch); - __ bind(*done); + __ Bind(*done); } void Uint32ToNumber::SetValueLocationConstraints() { @@ -707,7 +708,7 @@ void Uint32ToNumber::GenerateCode(MaglevAssembler* masm, }, object, value, done, this); __ Add(object, value, value); - __ bind(*done); + __ Bind(*done); } void Int32AddWithOverflow::SetValueLocationConstraints() { @@ -780,7 +781,7 @@ void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm, { MaglevAssembler::ScratchRegisterScope temps(masm); Register temp = temps.Acquire().W(); - __ orr(temp, left, right); + __ Orr(temp, left, right); __ Cmp(temp, Immediate(0)); // If one of them is negative, we must have a -0 result, which is non-int32, // so deopt. @@ -788,7 +789,7 @@ void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm, // reasons. Otherwise, the reason has to match the above. __ EmitEagerDeoptIf(lt, DeoptimizeReason::kOverflow, this); } - __ bind(&end); + __ Bind(&end); if (out_alias_input) { __ Move(out, res.W()); } @@ -844,7 +845,7 @@ void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm, __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32); }, done, left, right, this); - __ bind(*done); + __ Bind(*done); // Perform the actual integer division. MaglevAssembler::ScratchRegisterScope temps(masm); @@ -853,7 +854,7 @@ void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm, if (out_alias_input) { res = temps.Acquire().W(); } - __ sdiv(res, left, right); + __ Sdiv(res, left, right); // Check that the remainder is zero. Register temp = temps.Acquire().W(); @@ -923,7 +924,7 @@ void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, __ Jump(*rhs_checked); }, rhs_checked, rhs, this); - __ bind(*rhs_checked); + __ Bind(*rhs_checked); __ Cmp(lhs, Immediate(0)); __ JumpToDeferredIf( @@ -932,15 +933,15 @@ void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, Register out, Int32ModulusWithOverflow* node) { MaglevAssembler::ScratchRegisterScope temps(masm); Register res = temps.Acquire().W(); - __ neg(lhs, lhs); - __ udiv(res, lhs, rhs); - __ msub(out, res, rhs, lhs); + __ Neg(lhs, lhs); + __ Udiv(res, lhs, rhs); + __ Msub(out, res, rhs, lhs); __ Cmp(out, Immediate(0)); // TODO(victorgomes): This ideally should be kMinusZero, but Maglev // only allows one deopt reason per IR. __ EmitEagerDeoptIf(eq, deopt_reason, node); - __ neg(out, out); - __ b(*done); + __ Neg(out, out); + __ B(*done); }, done, lhs, rhs, out, this); @@ -955,7 +956,7 @@ void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, __ And(out, mask, lhs); __ Jump(*done); - __ bind(&rhs_not_power_of_2); + __ Bind(&rhs_not_power_of_2); // We store the result of the Udiv in a temporary register in case {out} is // the same as {lhs} or {rhs}: we'll still need those 2 registers intact to @@ -964,7 +965,7 @@ void Int32ModulusWithOverflow::GenerateCode(MaglevAssembler* masm, __ Udiv(res, lhs, rhs); __ Msub(out, res, rhs, lhs); - __ bind(*done); + __ Bind(*done); } #define DEF_BITWISE_BINOP(Instruction, opcode) \ @@ -998,7 +999,7 @@ void Int32BitwiseNot::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register value = ToRegister(value_input()).W(); Register out = ToRegister(result()).W(); - __ mvn(out, value); + __ Mvn(out, value); } void Float64Add::SetValueLocationConstraints() { @@ -1126,10 +1127,10 @@ void Float64CompareNode::GenerateCode( __ LoadRoot(result, RootIndex::kTrueValue); __ Jump(&end); { - __ bind(&is_false); + __ Bind(&is_false); __ LoadRoot(result, RootIndex::kFalseValue); } - __ bind(&end); + __ Bind(&end); } #define DEF_OPERATION(Name) \ @@ -1247,7 +1248,7 @@ void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm, } ZoneLabelRef done_byte_length(masm); - DeferredCodeInfo* deferred_get_byte_length = __ PushDeferredCode( + Label* deferred_get_byte_length = __ MakeDeferredCode( [](MaglevAssembler* masm, CheckJSDataViewBounds* node, ZoneLabelRef done, Register object, Register index, Register byte_length) { RegisterSnapshot snapshot = node->register_snapshot(); @@ -1271,12 +1272,12 @@ void CheckJSDataViewBounds::GenerateCode(MaglevAssembler* masm, }, this, done_byte_length, object, index, byte_length); __ Ldr(scratch.W(), FieldMemOperand(object, JSDataView::kBitFieldOffset)); - __ Cbnz(scratch.W(), &deferred_get_byte_length->deferred_code_label); + __ Cbnz(scratch.W(), deferred_get_byte_length); // Normal DataView (backed by AB / SAB) or non-length tracking backed by GSAB. __ LoadBoundedSizeFromObject(byte_length, object, JSDataView::kRawByteLengthOffset); - __ bind(*done_byte_length); + __ Bind(*done_byte_length); int element_size = ExternalArrayElementSize(element_type_); if (element_size > 1) { @@ -1325,8 +1326,8 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm, // Deopt if this isn't a thin string. __ Tst(instance_type.W(), Immediate(kThinStringTagBit)); __ EmitEagerDeoptIf(eq, DeoptimizeReason::kWrongMap, node); - __ LoadTaggedPointerField( - object, FieldMemOperand(object, ThinString::kActualOffset)); + __ LoadTaggedField(object, + FieldMemOperand(object, ThinString::kActualOffset)); if (v8_flags.debug_code) { __ RecordComment("DCHECK IsInternalizedString"); Register scratch = instance_type; @@ -1341,7 +1342,7 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm, __ jmp(*done); }, done, object, this, eager_deopt_info(), scratch); - __ bind(*done); + __ Bind(*done); } void UnsafeSmiTag::SetValueLocationConstraints() { @@ -1379,17 +1380,17 @@ void CheckedFloat64Unbox::GenerateCode(MaglevAssembler* masm, MaglevAssembler::ScratchRegisterScope temps(masm); Register temp = temps.Acquire(); __ SmiToInt32(temp, value); - __ sxtw(temp, temp.W()); - __ scvtf(ToDoubleRegister(result()), temp); + __ Sxtw(temp, temp.W()); + __ Scvtf(ToDoubleRegister(result()), temp); __ Jump(&done); - __ bind(&is_not_smi); + __ Bind(&is_not_smi); // Check if HeapNumber, deopt otherwise. __ Move(temp, FieldMemOperand(value, HeapObject::kMapOffset)); __ CompareRoot(temp, RootIndex::kHeapNumberMap); __ EmitEagerDeoptIf(ne, DeoptimizeReason::kNotANumber, this); __ Move(temp, FieldMemOperand(value, HeapNumber::kValueOffset)); - __ fmov(ToDoubleRegister(result()), temp); - __ bind(&done); + __ Fmov(ToDoubleRegister(result()), temp); + __ Bind(&done); } int GeneratorStore::MaxCallStackArgs() const { @@ -1408,7 +1409,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register generator = ToRegister(generator_input()); Register array = WriteBarrierDescriptor::ObjectRegister(); - __ LoadTaggedPointerField( + __ LoadTaggedField( array, FieldMemOperand(generator, JSGeneratorObject::kParametersAndRegistersOffset)); @@ -1421,7 +1422,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, WriteBarrierDescriptor::SlotAddressRegister()); ZoneLabelRef done(masm); - DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode( + Label* deferred_write_barrier = __ MakeDeferredCode( [](MaglevAssembler* masm, ZoneLabelRef done, Register value, Register array, GeneratorStore* node, int32_t offset) { ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); @@ -1453,9 +1454,9 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, // Consider hoisting the check out of the loop and duplicating the loop into // with and without write barrier. __ CheckPageFlag(array, MemoryChunk::kPointersFromHereAreInterestingMask, - ne, &deferred_write_barrier->deferred_code_label); + ne, deferred_write_barrier); - __ bind(*done); + __ Bind(*done); } // Use WriteBarrierDescriptor::SlotAddressRegister() as the scratch @@ -1464,7 +1465,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, context_input(), WriteBarrierDescriptor::SlotAddressRegister()); ZoneLabelRef done(masm); - DeferredCodeInfo* deferred_context_write_barrier = __ PushDeferredCode( + Label* deferred_context_write_barrier = __ MakeDeferredCode( [](MaglevAssembler* masm, ZoneLabelRef done, Register context, Register generator, GeneratorStore* node) { ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); @@ -1499,8 +1500,8 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, context, FieldMemOperand(generator, JSGeneratorObject::kContextOffset)); __ AssertNotSmi(context); __ CheckPageFlag(generator, MemoryChunk::kPointersFromHereAreInterestingMask, - ne, &deferred_context_write_barrier->deferred_code_label); - __ bind(*done); + ne, deferred_context_write_barrier); + __ Bind(*done); MaglevAssembler::ScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); @@ -1524,7 +1525,7 @@ void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm, Register budget = temps.Acquire().W(); __ Ldr(feedback_cell, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( feedback_cell, FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset)); __ Ldr(budget, @@ -1647,7 +1648,7 @@ void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done, Register budget = temps.Acquire().W(); __ Ldr(feedback_cell, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( feedback_cell, FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset)); __ Move(budget, v8_flags.interrupt_budget); @@ -1672,7 +1673,7 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm, Register budget = temps.Acquire().W(); __ Ldr(feedback_cell, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( + __ LoadTaggedField( feedback_cell, FieldMemOperand(feedback_cell, JSFunction::kFeedbackCellOffset)); __ Ldr(budget, @@ -1682,7 +1683,7 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm, FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); ZoneLabelRef done(masm); __ JumpToDeferredIf(lt, HandleInterruptsAndTiering, done, this, scratch); - __ bind(*done); + __ Bind(*done); } namespace { @@ -1711,13 +1712,15 @@ void GenerateTypedArrayLoad(MaglevAssembler* masm, NodeT* node, Register object, if constexpr (std::is_same_v) { if (IsSignedIntTypedArrayElementsKind(kind)) { int element_size = ElementsKindSize(kind); - __ Add(data_pointer, data_pointer, Operand(index, LSL, element_size / 2)); + __ Add(data_pointer, data_pointer, + Operand(index, LSL, ShiftFromScale(element_size))); __ LoadSignedField(result_reg.W(), MemOperand(data_pointer), element_size); } else { DCHECK(IsUnsignedIntTypedArrayElementsKind(kind)); int element_size = ElementsKindSize(kind); - __ Add(data_pointer, data_pointer, Operand(index, LSL, element_size / 2)); + __ Add(data_pointer, data_pointer, + Operand(index, LSL, ShiftFromScale(element_size))); __ LoadUnsignedField(result_reg.W(), MemOperand(data_pointer), element_size); } @@ -1793,8 +1796,8 @@ void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm, } Register result_reg = ToRegister(result()); __ Add(result_reg, elements, Operand(index, LSL, kTaggedSizeLog2)); - __ DecompressAnyTagged(result_reg, - FieldMemOperand(result_reg, FixedArray::kHeaderSize)); + __ DecompressTagged(result_reg, + FieldMemOperand(result_reg, FixedArray::kHeaderSize)); } void LoadFixedDoubleArrayElement::SetValueLocationConstraints() { @@ -1829,7 +1832,7 @@ void StoreDoubleField::GenerateCode(MaglevAssembler* masm, Register tmp = temps.Acquire(); __ AssertNotSmi(object); - __ DecompressAnyTagged(tmp, FieldMemOperand(object, offset())); + __ DecompressTagged(tmp, FieldMemOperand(object, offset())); __ AssertNotSmi(tmp); __ Move(FieldMemOperand(tmp, HeapNumber::kValueOffset), value); } @@ -1857,7 +1860,7 @@ void StoreMap::GenerateCode(MaglevAssembler* masm, __ StoreTaggedField(value, FieldMemOperand(object, HeapObject::kMapOffset)); ZoneLabelRef done(masm); - DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode( + Label* deferred_write_barrier = __ MakeDeferredCode( [](MaglevAssembler* masm, ZoneLabelRef done, Register value, Register object, StoreMap* node) { ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); @@ -1888,8 +1891,8 @@ void StoreMap::GenerateCode(MaglevAssembler* masm, __ JumpIfSmi(value, *done); __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, ne, - &deferred_write_barrier->deferred_code_label); - __ bind(*done); + deferred_write_barrier); + __ Bind(*done); } void LoadSignedIntDataViewElement::SetValueLocationConstraints() { @@ -1938,9 +1941,9 @@ void LoadSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm, ZoneLabelRef is_little_endian(masm), is_big_endian(masm); __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian, is_big_endian, false); - __ bind(*is_big_endian); + __ Bind(*is_big_endian); __ ReverseByteOrder(result_reg, element_size); - __ bind(*is_little_endian); + __ Bind(*is_little_endian); // arm64 is little endian. static_assert(V8_TARGET_LITTLE_ENDIAN == 1); } @@ -1986,9 +1989,9 @@ void StoreSignedIntDataViewElement::GenerateCode(MaglevAssembler* masm, ZoneLabelRef is_little_endian(masm), is_big_endian(masm); __ ToBoolean(ToRegister(is_little_endian_input()), is_little_endian, is_big_endian, false); - __ bind(*is_big_endian); + __ Bind(*is_big_endian); __ ReverseByteOrder(value, element_size); - __ bind(*is_little_endian); + __ Bind(*is_little_endian); // arm64 is little endian. static_assert(V8_TARGET_LITTLE_ENDIAN == 1); } @@ -2050,17 +2053,17 @@ void LoadDoubleDataViewElement::GenerateCode(MaglevAssembler* masm, is_big_endian, true); // arm64 is little endian. static_assert(V8_TARGET_LITTLE_ENDIAN == 1); - __ bind(*is_little_endian); + __ Bind(*is_little_endian); __ Move(result_reg, MemOperand(data_pointer, index)); - __ jmp(&done); + __ B(&done); // We should swap the bytes if big endian. - __ bind(*is_big_endian); + __ Bind(*is_big_endian); MaglevAssembler::ScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ Move(scratch, MemOperand(data_pointer, index)); __ Rev(scratch, scratch); __ Fmov(result_reg, scratch); - __ bind(&done); + __ Bind(&done); } } @@ -2113,17 +2116,17 @@ void StoreDoubleDataViewElement::GenerateCode(MaglevAssembler* masm, is_big_endian, true); // arm64 is little endian. static_assert(V8_TARGET_LITTLE_ENDIAN == 1); - __ bind(*is_little_endian); + __ Bind(*is_little_endian); __ Str(value, MemOperand(data_pointer, index)); - __ jmp(&done); + __ B(&done); // We should swap the bytes if big endian. - __ bind(*is_big_endian); + __ Bind(*is_big_endian); MaglevAssembler::ScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ Fmov(scratch, value); __ Rev(scratch, scratch); __ Str(scratch, MemOperand(data_pointer, index)); - __ bind(&done); + __ Bind(&done); } } @@ -2147,7 +2150,7 @@ void StoreTaggedFieldWithWriteBarrier::GenerateCode( __ StoreTaggedField(FieldMemOperand(object, offset()), value); ZoneLabelRef done(masm); - DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode( + Label* deferred_write_barrier = __ MakeDeferredCode( [](MaglevAssembler* masm, ZoneLabelRef done, Register value, Register object, StoreTaggedFieldWithWriteBarrier* node) { ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); @@ -2178,9 +2181,9 @@ void StoreTaggedFieldWithWriteBarrier::GenerateCode( __ JumpIfSmi(value, *done); __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, ne, - &deferred_write_barrier->deferred_code_label); + deferred_write_barrier); - __ bind(*done); + __ Bind(*done); } void SetPendingMessage::SetValueLocationConstraints() { @@ -2233,10 +2236,10 @@ void TestUndetectable::GenerateCode(MaglevAssembler* masm, __ LoadRoot(return_value, RootIndex::kTrueValue); __ B(&done); - __ bind(&return_false); + __ Bind(&return_false); __ LoadRoot(return_value, RootIndex::kFalseValue); - __ bind(&done); + __ Bind(&done); } int ThrowIfNotSuperConstructor::MaxCallStackArgs() const { return 2; } @@ -2246,7 +2249,7 @@ void ThrowIfNotSuperConstructor::SetValueLocationConstraints() { } void ThrowIfNotSuperConstructor::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { - DeferredCodeInfo* deferred_abort = __ PushDeferredCode( + Label* deferred_abort = __ MakeDeferredCode( [](MaglevAssembler* masm, ThrowIfNotSuperConstructor* node) { __ Push(ToRegister(node->constructor()), ToRegister(node->function())); __ Move(kContextRegister, masm->native_context().object()); @@ -2260,7 +2263,7 @@ void ThrowIfNotSuperConstructor::GenerateCode(MaglevAssembler* masm, __ LoadMap(scratch, ToRegister(constructor())); __ Ldr(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); __ TestAndBranchIfAllClear(scratch, Map::Bits1::IsConstructorBit::kMask, - &deferred_abort->deferred_code_label); + deferred_abort); } // --- @@ -2296,13 +2299,13 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { __ CompareAndBranch(params_size, actual_params_size, ge, &corrected_args_count); __ Mov(params_size, actual_params_size); - __ bind(&corrected_args_count); + __ Bind(&corrected_args_count); // Leave the frame. __ LeaveFrame(StackFrame::MAGLEV); // Drop receiver + arguments according to dynamic arguments size. - __ DropArguments(params_size, TurboAssembler::kCountIncludesReceiver); + __ DropArguments(params_size, MacroAssembler::kCountIncludesReceiver); __ Ret(); } diff --git a/src/maglev/maglev-assembler-inl.h b/src/maglev/maglev-assembler-inl.h index 079cd78f4c..d57699c12c 100644 --- a/src/maglev/maglev-assembler-inl.h +++ b/src/maglev/maglev-assembler-inl.h @@ -5,6 +5,8 @@ #ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_ #define V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_ +#include "src/maglev/maglev-assembler.h" + #ifdef V8_TARGET_ARCH_ARM64 #include "src/maglev/arm64/maglev-assembler-arm64-inl.h" #elif V8_TARGET_ARCH_X64 @@ -167,8 +169,8 @@ class DeferredCodeInfoImpl final : public DeferredCodeInfo { } // namespace detail template -inline DeferredCodeInfo* MaglevAssembler::PushDeferredCode( - Function&& deferred_code_gen, Args&&... args) { +inline Label* MaglevAssembler::MakeDeferredCode(Function&& deferred_code_gen, + Args&&... args) { using FunctionPointer = typename detail::FunctionArgumentsTupleHelper::FunctionPointer; static_assert( @@ -177,7 +179,7 @@ inline DeferredCodeInfo* MaglevAssembler::PushDeferredCode( std::declval(), std::declval()))...>, "Parameters of deferred_code_gen function should match arguments into " - "PushDeferredCode"); + "MakeDeferredCode"); ScratchRegisterScope scratch_scope(this); using DeferredCodeInfoT = detail::DeferredCodeInfoImpl; @@ -188,7 +190,7 @@ inline DeferredCodeInfo* MaglevAssembler::PushDeferredCode( std::forward(args)...); code_gen_state()->PushDeferredCode(deferred_code); - return deferred_code; + return &deferred_code->deferred_code_label; } // Note this doesn't take capturing lambdas by design, since state may @@ -198,12 +200,12 @@ template inline void MaglevAssembler::JumpToDeferredIf(Condition cond, Function&& deferred_code_gen, Args&&... args) { - DeferredCodeInfo* deferred_code = PushDeferredCode( - std::forward(deferred_code_gen), std::forward(args)...); if (v8_flags.code_comments) { RecordComment("-- Jump to deferred code"); } - JumpIf(cond, &deferred_code->deferred_code_label); + JumpIf(cond, MakeDeferredCode( + std::forward(deferred_code_gen), + std::forward(args)...)); } inline void MaglevAssembler::SmiToDouble(DoubleRegister result, Register smi) { @@ -242,6 +244,27 @@ inline void MaglevAssembler::Branch(Condition condition, Label* if_true, } } +inline void MaglevAssembler::LoadTaggedField(Register result, + MemOperand operand) { + MacroAssembler::LoadTaggedField(result, operand); +} + +inline void MaglevAssembler::LoadTaggedField(Register result, Register object, + int offset) { + MacroAssembler::LoadTaggedField(result, FieldMemOperand(object, offset)); +} + +inline void MaglevAssembler::LoadTaggedSignedField(Register result, + MemOperand operand) { + MacroAssembler::LoadTaggedField(result, operand); +} + +inline void MaglevAssembler::LoadTaggedSignedField(Register result, + Register object, + int offset) { + MacroAssembler::LoadTaggedField(result, FieldMemOperand(object, offset)); +} + } // namespace maglev } // namespace internal } // namespace v8 diff --git a/src/maglev/maglev-assembler.cc b/src/maglev/maglev-assembler.cc index 8c7d1a4c49..3f245cd60a 100644 --- a/src/maglev/maglev-assembler.cc +++ b/src/maglev/maglev-assembler.cc @@ -32,9 +32,8 @@ void MaglevAssembler::LoadSingleCharacterString(Register result, DCHECK_LT(char_code, String::kMaxOneByteCharCode); Register table = result; LoadRoot(table, RootIndex::kSingleCharacterStringTable); - DecompressAnyTagged( - result, FieldMemOperand( - table, FixedArray::kHeaderSize + char_code * kTaggedSize)); + DecompressTagged(result, FieldMemOperand(table, FixedArray::kHeaderSize + + char_code * kTaggedSize)); } void MaglevAssembler::LoadDataField(const PolymorphicAccessInfo& access_info, @@ -54,13 +53,12 @@ void MaglevAssembler::LoadDataField(const PolymorphicAccessInfo& access_info, } // The field is in the property array, first load it from there. AssertNotSmi(load_source_object); - DecompressAnyTagged(load_source, - FieldMemOperand(load_source_object, - JSReceiver::kPropertiesOrHashOffset)); + DecompressTagged(load_source, + FieldMemOperand(load_source_object, + JSReceiver::kPropertiesOrHashOffset)); } AssertNotSmi(load_source); - DecompressAnyTagged(result, - FieldMemOperand(load_source, field_index.offset())); + DecompressTagged(result, FieldMemOperand(load_source, field_index.offset())); } } // namespace maglev diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index 7c6cf74395..6f719cbf99 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -98,6 +98,13 @@ class MaglevAssembler : public MacroAssembler { Register FromAnyToRegister(const Input& input, Register scratch); + inline void LoadTaggedField(Register result, MemOperand operand); + inline void LoadTaggedField(Register result, Register object, int offset); + inline void LoadTaggedSignedField(Register result, MemOperand operand); + inline void LoadTaggedSignedField(Register result, Register object, + int offset); + inline void LoadTaggedFieldByIndex(Register result, Register object, + Register index, int scale, int offset); inline void LoadBoundedSizeFromObject(Register result, Register object, int offset); inline void LoadExternalPointerField(Register result, MemOperand operand); @@ -150,12 +157,10 @@ class MaglevAssembler : public MacroAssembler { inline void DefineExceptionHandlerAndLazyDeoptPoint(NodeBase* node); template - inline DeferredCodeInfo* PushDeferredCode(Function&& deferred_code_gen, - Args&&... args); + inline Label* MakeDeferredCode(Function&& deferred_code_gen, Args&&... args); template inline void JumpToDeferredIf(Condition cond, Function&& deferred_code_gen, Args&&... args); - template inline Label* GetDeoptLabel(NodeT* node, DeoptimizeReason reason); template @@ -189,6 +194,7 @@ class MaglevAssembler : public MacroAssembler { inline void LoadByte(Register dst, MemOperand src); inline void SignExtend32To64Bits(Register dst, Register src); + inline void NegateInt32(Register val); template inline void DeoptIfBufferDetached(Register array, Register scratch, @@ -201,6 +207,12 @@ class MaglevAssembler : public MacroAssembler { InstanceType lower_limit, InstanceType higher_limit); + inline void CompareInstanceTypeRange(Register map, InstanceType lower_limit, + InstanceType higher_limit); + inline void CompareInstanceTypeRange(Register map, Register instance_type_out, + InstanceType lower_limit, + InstanceType higher_limit); + inline void CompareTagged(Register reg, Handle obj); inline void CompareInt32(Register reg, int32_t imm); @@ -222,6 +234,14 @@ class MaglevAssembler : public MacroAssembler { inline void CompareInt32AndJumpIf(Register r1, Register r2, Condition cond, Label* target, Label::Distance distance = Label::kFar); + inline void CompareInt32AndJumpIf(Register r1, int32_t value, Condition cond, + Label* target, + Label::Distance distance = Label::kFar); + inline void TestInt32AndJumpIfAnySet(Register r1, int32_t mask, Label* target, + Label::Distance distance = Label::kFar); + inline void TestInt32AndJumpIfAllClear( + Register r1, int32_t mask, Label* target, + Label::Distance distance = Label::kFar); inline void Int32ToDouble(DoubleRegister result, Register n); inline void SmiToDouble(DoubleRegister result, Register smi); diff --git a/src/maglev/maglev-code-generator.cc b/src/maglev/maglev-code-generator.cc index e9706c7387..166fb886a9 100644 --- a/src/maglev/maglev-code-generator.cc +++ b/src/maglev/maglev-code-generator.cc @@ -17,6 +17,7 @@ #include "src/common/globals.h" #include "src/compiler/backend/instruction.h" #include "src/deoptimizer/deoptimize-reason.h" +#include "src/deoptimizer/deoptimizer.h" #include "src/deoptimizer/translation-array.h" #include "src/execution/frame-constants.h" #include "src/interpreter/bytecode-register.h" @@ -1240,6 +1241,7 @@ void MaglevCodeGenerator::EmitCode() { processor.ProcessGraph(graph_); EmitDeferredCode(); EmitDeopts(); + if (code_gen_failed_) return; EmitExceptionHandlerTrampolines(); __ FinishCode(); } @@ -1258,6 +1260,13 @@ void MaglevCodeGenerator::EmitDeferredCode() { } void MaglevCodeGenerator::EmitDeopts() { + const size_t num_deopts = code_gen_state_.eager_deopts().size() + + code_gen_state_.lazy_deopts().size(); + if (num_deopts > Deoptimizer::kMaxNumberOfEntries) { + code_gen_failed_ = true; + return; + } + MaglevTranslationArrayBuilder translation_builder( local_isolate_, &masm_, &translation_array_builder_, &deopt_literals_); @@ -1351,6 +1360,8 @@ void MaglevCodeGenerator::EmitMetadata() { } MaybeHandle MaglevCodeGenerator::BuildCodeObject(Isolate* isolate) { + if (code_gen_failed_) return {}; + CodeDesc desc; masm()->GetCode(isolate, &desc, &safepoint_table_builder_, handler_table_offset_); diff --git a/src/maglev/maglev-code-generator.h b/src/maglev/maglev-code-generator.h index 719e2d1154..5966903d41 100644 --- a/src/maglev/maglev-code-generator.h +++ b/src/maglev/maglev-code-generator.h @@ -55,6 +55,8 @@ class MaglevCodeGenerator final { IdentityMap deopt_literals_; int deopt_exit_start_offset_ = -1; int handler_table_offset_ = 0; + + bool code_gen_failed_ = false; }; } // namespace maglev diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index fe35cd0e79..5b37a8a3e8 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -1639,6 +1639,7 @@ void MaglevGraphBuilder::BuildCheckMaps( if (merger.emit_check_with_migration()) { AddNewNode({object}, merger.intersect_set(), GetCheckType(known_info->type)); + MarkPossibleMapMigration(); } else { AddNewNode({object}, merger.intersect_set(), GetCheckType(known_info->type)); @@ -1736,7 +1737,10 @@ bool MaglevGraphBuilder::TryBuildPropertyGetterCall( ? ConvertReceiverMode::kNotNullOrUndefined : ConvertReceiverMode::kAny; CallArguments args(receiver_mode, {receiver}); - SetAccumulator(ReduceCall(constant.AsJSFunction(), args)); + ReduceResult result = ReduceCall(constant.AsJSFunction(), args); + // TODO(victorgomes): Propagate the case if we need to soft deopt. + DCHECK(!result.IsDoneWithAbort()); + SetAccumulator(result.value()); } else if (receiver != lookup_start_object) { return false; } else { @@ -1764,7 +1768,10 @@ bool MaglevGraphBuilder::TryBuildPropertySetterCall( if (constant.IsJSFunction()) { CallArguments args(ConvertReceiverMode::kNotNullOrUndefined, {receiver, value}); - SetAccumulator(ReduceCall(constant.AsJSFunction(), args)); + ReduceResult result = ReduceCall(constant.AsJSFunction(), args); + // TODO(victorgomes): Propagate the case if we need to soft deopt. + DCHECK(!result.IsDoneWithAbort()); + SetAccumulator(result.value()); return true; } else { // TODO(victorgomes): API calls. @@ -2005,23 +2012,6 @@ bool MaglevGraphBuilder::TryBuildPropertyAccess( } } -namespace { -bool HasOnlyStringMaps(base::Vector maps) { - for (compiler::MapRef map : maps) { - if (!map.IsStringMap()) return false; - } - return true; -} - -bool HasOnlyNumberMaps(base::Vector maps) { - for (compiler::MapRef map : maps) { - if (map.instance_type() != HEAP_NUMBER_TYPE) return false; - } - return true; -} - -} // namespace - bool MaglevGraphBuilder::TryBuildNamedAccess( ValueNode* receiver, ValueNode* lookup_start_object, compiler::NamedAccessFeedback const& feedback, @@ -2557,6 +2547,28 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() { broker()->GetFeedbackForPropertyAccess( feedback_source, compiler::AccessMode::kLoad, base::nullopt); + if (current_for_in_state.index != nullptr && + current_for_in_state.receiver == object && + current_for_in_state.key == current_interpreter_frame_.accumulator()) { + if (current_for_in_state.receiver_needs_map_check) { + auto* receiver_map = + AddNewNode({object}, HeapObject::kMapOffset); + AddNewNode( + {receiver_map, current_for_in_state.cache_type}); + current_for_in_state.receiver_needs_map_check = false; + } + // TODO(leszeks): Cache the indices across the loop. + auto* cache_array = AddNewNode( + {current_for_in_state.enum_cache}, EnumCache::kIndicesOffset); + // TODO(leszeks): Do we need to check that the indices aren't empty? + // TODO(leszeks): Cache the field index per iteration. + auto* field_index = AddNewNode( + {cache_array, current_for_in_state.index}); + SetAccumulator( + AddNewNode({object, field_index})); + return; + } + switch (processed_feedback.kind()) { case compiler::ProcessedFeedback::kInsufficient: EmitUnconditionalDeopt( @@ -2578,7 +2590,7 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() { case compiler::ProcessedFeedback::kNamedAccess: { ValueNode* key = GetAccumulatorTagged(); compiler::NameRef name = processed_feedback.AsNamedAccess().name(); - if (!BuildCheckValue(key, name)) return; + if (BuildCheckValue(key, name).IsDoneWithAbort()) return; if (TryReuseKnownPropertyLoad(object, name)) return; if (TryBuildNamedAccess(object, object, processed_feedback.AsNamedAccess(), @@ -3009,35 +3021,90 @@ void MaglevGraphBuilder::VisitFindNonDefaultConstructorOrConstruct() { StoreRegisterPair(result, call_builtin); } -ValueNode* MaglevGraphBuilder::TryBuildInlinedCall( +ReduceResult MaglevGraphBuilder::BuildInlined(const CallArguments& args, + BasicBlockRef* start_ref, + BasicBlockRef* end_ref) { + DCHECK(is_inline()); + + // Manually create the prologue of the inner function graph, so that we + // can manually set up the arguments. + StartPrologue(); + + // Set receiver. + SetArgument(0, GetConvertReceiver(function(), args)); + // Set remaining arguments. + RootConstant* undefined_constant = + GetRootConstant(RootIndex::kUndefinedValue); + for (int i = 1; i < parameter_count(); i++) { + ValueNode* arg_value = args[i - 1]; + if (arg_value == nullptr) arg_value = undefined_constant; + SetArgument(i, arg_value); + } + BuildRegisterFrameInitialization(GetConstant(function().context()), + GetConstant(function())); + BuildMergeStates(); + BasicBlock* inlined_prologue = EndPrologue(); + + // Set the entry JumpToInlined to jump to the prologue block. + // TODO(leszeks): Passing start_ref to JumpToInlined creates a two-element + // linked list of refs. Consider adding a helper to explicitly set the target + // instead. + start_ref->SetToBlockAndReturnNext(inlined_prologue) + ->SetToBlockAndReturnNext(inlined_prologue); + + // Build the inlined function body. + BuildBody(); + + // All returns in the inlined body jump to a merge point one past the + // bytecode length (i.e. at offset bytecode.length()). Create a block at + // this fake offset and have it jump out of the inlined function, into a new + // block that we create which resumes execution of the outer function. + // TODO(leszeks): Wrap this up in a helper. + DCHECK_NULL(current_block_); + + // If we don't have a merge state at the inline_exit_offset, then there is no + // control flow that reaches the end of the inlined function, either because + // of infinite loops or deopts + if (merge_states_[inline_exit_offset()] == nullptr) { + return ReduceResult::DoneWithAbort(); + } + + ProcessMergePoint(inline_exit_offset()); + StartNewBlock(inline_exit_offset()); + FinishBlock({}, end_ref); + + // Pull the returned accumulator value out of the inlined function's final + // merged return state. + return current_interpreter_frame_.accumulator(); +} + +ReduceResult MaglevGraphBuilder::TryBuildInlinedCall( compiler::JSFunctionRef function, CallArguments& args) { // Don't try to inline if the target function hasn't been compiled yet. // TODO(verwaest): Soft deopt instead? - if (!function.shared().HasBytecodeArray()) return nullptr; - if (!function.feedback_vector(broker()->dependencies()).has_value()) { - return nullptr; + if (!function.shared().HasBytecodeArray()) { + return ReduceResult::Fail(); + } + if (!function.feedback_vector(broker()->dependencies()).has_value()) { + return ReduceResult::Fail(); + } + if (function.code().object()->kind() == CodeKind::TURBOFAN) { + return ReduceResult::Fail(); } - if (function.code().object()->kind() == CodeKind::TURBOFAN) return nullptr; // TODO(victorgomes): Support NewTarget/RegisterInput in inlined functions. compiler::BytecodeArrayRef bytecode = function.shared().GetBytecodeArray(); if (bytecode.incoming_new_target_or_generator_register().is_valid()) { - return nullptr; + return ReduceResult::Fail(); } // TODO(victorgomes): Support exception handler inside inlined functions. if (bytecode.handler_table_size() > 0) { - return nullptr; + return ReduceResult::Fail(); } if (v8_flags.trace_maglev_inlining) { std::cout << " inlining " << function.shared() << std::endl; } - // The undefined constant node has to be created before the inner graph is - // created. - RootConstant* undefined_constant; - if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) { - undefined_constant = GetRootConstant(RootIndex::kUndefinedValue); - } // Create a new compilation unit and graph builder for the inlined // function. @@ -3050,50 +3117,13 @@ ValueNode* MaglevGraphBuilder::TryBuildInlinedCall( BasicBlockRef start_ref, end_ref; BasicBlock* block = FinishBlock({}, &start_ref, inner_unit); - // Manually create the prologue of the inner function graph, so that we - // can manually set up the arguments. - inner_graph_builder.StartPrologue(); - - if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) { - if (function.shared().language_mode() == LanguageMode::kSloppy) { - // TODO(leszeks): Store the global proxy somehow. - inner_graph_builder.SetArgument(0, undefined_constant); - } else { - inner_graph_builder.SetArgument(0, undefined_constant); - } - } else { - inner_graph_builder.SetArgument(0, args.receiver()); + ReduceResult result = + inner_graph_builder.BuildInlined(args, &start_ref, &end_ref); + if (result.IsDoneWithAbort()) { + MarkBytecodeDead(); + return ReduceResult::DoneWithAbort(); } - for (int i = 1; i < inner_unit->parameter_count(); i++) { - ValueNode* arg_value = args[i - 1]; - if (arg_value == nullptr) arg_value = undefined_constant; - inner_graph_builder.SetArgument(i, arg_value); - } - inner_graph_builder.BuildRegisterFrameInitialization(GetContext(), - GetConstant(function)); - inner_graph_builder.BuildMergeStates(); - BasicBlock* inlined_prologue = inner_graph_builder.EndPrologue(); - - // Set the entry JumpToInlined to jump to the prologue block. - // TODO(leszeks): Passing start_ref to JumpToInlined creates a two-element - // linked list of refs. Consider adding a helper to explicitly set the target - // instead. - start_ref.SetToBlockAndReturnNext(inlined_prologue) - ->SetToBlockAndReturnNext(inlined_prologue); - - // Build the inlined function body. - inner_graph_builder.BuildBody(); - - // All returns in the inlined body jump to a merge point one past the - // bytecode length (i.e. at offset bytecode.length()). Create a block at - // this fake offset and have it jump out of the inlined function, into a new - // block that we create which resumes execution of the outer function. - // TODO(leszeks): Wrap this up in a helper. - DCHECK_NULL(inner_graph_builder.current_block_); - inner_graph_builder.ProcessMergePoint( - inner_graph_builder.inline_exit_offset()); - inner_graph_builder.StartNewBlock(inner_graph_builder.inline_exit_offset()); - inner_graph_builder.FinishBlock({}, &end_ref); + DCHECK(result.HasValue()); // Create a new block at our current offset, and resume execution. Do this // manually to avoid trying to resolve any merges to this offset, which will @@ -3108,24 +3138,20 @@ ValueNode* MaglevGraphBuilder::TryBuildInlinedCall( end_ref.SetToBlockAndReturnNext(current_block_) ->SetToBlockAndReturnNext(current_block_); - // Pull the returned accumulator value out of the inlined function's final - // merged return state. - ValueNode* result = - inner_graph_builder.current_interpreter_frame_.accumulator(); #ifdef DEBUG - new_nodes_.insert(result); + new_nodes_.insert(result.value()); #endif return result; } -ValueNode* MaglevGraphBuilder::TryReduceStringFromCharCode( +ReduceResult MaglevGraphBuilder::TryReduceStringFromCharCode( compiler::JSFunctionRef target, CallArguments& args) { - if (args.count() != 1) return nullptr; + if (args.count() != 1) return ReduceResult::Fail(); return AddNewNode( {GetTruncatedInt32FromNumber(args[0])}); } -ValueNode* MaglevGraphBuilder::TryReduceStringPrototypeCharCodeAt( +ReduceResult MaglevGraphBuilder::TryReduceStringPrototypeCharCodeAt( compiler::JSFunctionRef target, CallArguments& args) { ValueNode* receiver = GetTaggedOrUndefined(args.receiver()); ValueNode* index; @@ -3147,11 +3173,11 @@ ValueNode* MaglevGraphBuilder::TryReduceStringPrototypeCharCodeAt( } template -ValueNode* MaglevGraphBuilder::TryBuildLoadDataView(const CallArguments& args, - ExternalArrayType type) { +ReduceResult MaglevGraphBuilder::TryBuildLoadDataView(const CallArguments& args, + ExternalArrayType type) { if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) { // TODO(victorgomes): Add checks whether the array has been detached. - return nullptr; + return ReduceResult::Fail(); } // TODO(victorgomes): Add data view to known types. ValueNode* receiver = GetTaggedOrUndefined(args.receiver()); @@ -3166,12 +3192,11 @@ ValueNode* MaglevGraphBuilder::TryBuildLoadDataView(const CallArguments& args, } template -ValueNode* MaglevGraphBuilder::TryBuildStoreDataView(const CallArguments& args, - ExternalArrayType type, - Function&& getValue) { +ReduceResult MaglevGraphBuilder::TryBuildStoreDataView( + const CallArguments& args, ExternalArrayType type, Function&& getValue) { if (!broker()->dependencies()->DependOnArrayBufferDetachingProtector()) { // TODO(victorgomes): Add checks whether the array has been detached. - return nullptr; + return ReduceResult::Fail(); } // TODO(victorgomes): Add data view to known types. ValueNode* receiver = GetTaggedOrUndefined(args.receiver()); @@ -3188,48 +3213,48 @@ ValueNode* MaglevGraphBuilder::TryBuildStoreDataView(const CallArguments& args, return GetRootConstant(RootIndex::kUndefinedValue); } -ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt8( +ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt8( compiler::JSFunctionRef target, CallArguments& args) { return TryBuildLoadDataView( args, ExternalArrayType::kExternalInt8Array); } -ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt8( +ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt8( compiler::JSFunctionRef target, CallArguments& args) { return TryBuildStoreDataView( args, ExternalArrayType::kExternalInt8Array, [&](ValueNode* value) { return value ? GetInt32(value) : GetInt32Constant(0); }); } -ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt16( +ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt16( compiler::JSFunctionRef target, CallArguments& args) { return TryBuildLoadDataView( args, ExternalArrayType::kExternalInt16Array); } -ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt16( +ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt16( compiler::JSFunctionRef target, CallArguments& args) { return TryBuildStoreDataView( args, ExternalArrayType::kExternalInt16Array, [&](ValueNode* value) { return value ? GetInt32(value) : GetInt32Constant(0); }); } -ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt32( +ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetInt32( compiler::JSFunctionRef target, CallArguments& args) { return TryBuildLoadDataView( args, ExternalArrayType::kExternalInt32Array); } -ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt32( +ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetInt32( compiler::JSFunctionRef target, CallArguments& args) { return TryBuildStoreDataView( args, ExternalArrayType::kExternalInt32Array, [&](ValueNode* value) { return value ? GetInt32(value) : GetInt32Constant(0); }); } -ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeGetFloat64( +ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeGetFloat64( compiler::JSFunctionRef target, CallArguments& args) { return TryBuildLoadDataView( args, ExternalArrayType::kExternalFloat64Array); } -ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeSetFloat64( +ReduceResult MaglevGraphBuilder::TryReduceDataViewPrototypeSetFloat64( compiler::JSFunctionRef target, CallArguments& args) { return TryBuildStoreDataView( args, ExternalArrayType::kExternalFloat64Array, [&](ValueNode* value) { @@ -3239,11 +3264,11 @@ ValueNode* MaglevGraphBuilder::TryReduceDataViewPrototypeSetFloat64( }); } -ValueNode* MaglevGraphBuilder::TryReduceFunctionPrototypeCall( +ReduceResult MaglevGraphBuilder::TryReduceFunctionPrototypeCall( compiler::JSFunctionRef target, CallArguments& args) { // We can't reduce Function#call when there is no receiver function. if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) { - return nullptr; + return ReduceResult::Fail(); } // Use Function.prototype.call context, to ensure any exception is thrown in // the correct context. @@ -3253,8 +3278,23 @@ ValueNode* MaglevGraphBuilder::TryReduceFunctionPrototypeCall( return BuildGenericCall(receiver, context, Call::TargetType::kAny, args); } -ValueNode* MaglevGraphBuilder::TryReduceMathPow(compiler::JSFunctionRef target, - CallArguments& args) { +ReduceResult MaglevGraphBuilder::TryReduceObjectPrototypeHasOwnProperty( + compiler::JSFunctionRef target, CallArguments& args) { + // We can't reduce Function#call when there is no receiver function. + if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) { + return ReduceResult::Fail(); + } + if (args.receiver() != current_for_in_state.receiver) { + return ReduceResult::Fail(); + } + if (args.count() != 1 || args[0] != current_for_in_state.key) { + return ReduceResult::Fail(); + } + return GetRootConstant(RootIndex::kTrueValue); +} + +ReduceResult MaglevGraphBuilder::TryReduceMathPow( + compiler::JSFunctionRef target, CallArguments& args) { if (args.count() < 2) { return GetRootConstant(RootIndex::kNanValue); } @@ -3263,7 +3303,7 @@ ValueNode* MaglevGraphBuilder::TryReduceMathPow(compiler::JSFunctionRef target, // don't need to unbox both inputs. See https://crbug.com/1393643. if (args[0]->properties().is_tagged() && args[1]->properties().is_tagged()) { // The Math.pow call will be created in CallKnownJSFunction reduction. - return nullptr; + return ReduceResult::Fail(); } ValueNode* left = GetFloat64(args[0]); ValueNode* right = GetFloat64(args[1]); @@ -3292,7 +3332,7 @@ ValueNode* MaglevGraphBuilder::TryReduceMathPow(compiler::JSFunctionRef target, V(MathTanh, tanh) #define MATH_UNARY_IEEE_BUILTIN_REDUCER(Name, IeeeOp) \ - ValueNode* MaglevGraphBuilder::TryReduce##Name( \ + ReduceResult MaglevGraphBuilder::TryReduce##Name( \ compiler::JSFunctionRef target, CallArguments& args) { \ if (args.count() < 1) { \ return GetRootConstant(RootIndex::kNanValue); \ @@ -3307,21 +3347,23 @@ MAP_MATH_UNARY_TO_IEEE_754(MATH_UNARY_IEEE_BUILTIN_REDUCER) #undef MATH_UNARY_IEEE_BUILTIN_REDUCER #undef MAP_MATH_UNARY_TO_IEEE_754 -ValueNode* MaglevGraphBuilder::TryReduceBuiltin( +ReduceResult MaglevGraphBuilder::TryReduceBuiltin( compiler::JSFunctionRef target, CallArguments& args, const compiler::FeedbackSource& feedback_source, SpeculationMode speculation_mode) { if (args.mode() != CallArguments::kDefault) { // TODO(victorgomes): Maybe inline the spread stub? Or call known function // directly if arguments list is an array. - return nullptr; + return ReduceResult::Fail(); } if (speculation_mode == SpeculationMode::kDisallowSpeculation) { // TODO(leszeks): Some builtins might be inlinable without speculation. - return nullptr; + return ReduceResult::Fail(); } CallSpeculationScope speculate(this, feedback_source); - if (!target.shared().HasBuiltinId()) return nullptr; + if (!target.shared().HasBuiltinId()) { + return ReduceResult::Fail(); + } switch (target.shared().builtin_id()) { #define CASE(Name) \ case Builtin::k##Name: \ @@ -3330,12 +3372,12 @@ ValueNode* MaglevGraphBuilder::TryReduceBuiltin( #undef CASE default: // TODO(v8:7700): Inline more builtins. - return nullptr; + return ReduceResult::Fail(); } } ValueNode* MaglevGraphBuilder::GetConvertReceiver( - compiler::JSFunctionRef function, CallArguments& args) { + compiler::JSFunctionRef function, const CallArguments& args) { compiler::SharedFunctionInfoRef shared = function.shared(); if (shared.native() || shared.language_mode() == LanguageMode::kStrict) { if (args.receiver_mode() == ConvertReceiverMode::kNullOrUndefined) { @@ -3394,21 +3436,19 @@ ValueNode* MaglevGraphBuilder::BuildGenericCall( } } -ValueNode* MaglevGraphBuilder::TryBuildCallKnownJSFunction( +ReduceResult MaglevGraphBuilder::TryBuildCallKnownJSFunction( compiler::JSFunctionRef function, CallArguments& args) { // Don't inline CallFunction stub across native contexts. if (function.native_context() != broker()->target_native_context()) { - return nullptr; + return ReduceResult::Fail(); } if (args.mode() != CallArguments::kDefault) { // TODO(victorgomes): Maybe inline the spread stub? Or call known function // directly if arguments list is an array. - return nullptr; + return ReduceResult::Fail(); } if (v8_flags.maglev_inlining) { - if (ValueNode* inlined_result = TryBuildInlinedCall(function, args)) { - return inlined_result; - } + RETURN_IF_DONE(TryBuildInlinedCall(function, args)); } ValueNode* receiver = GetConvertReceiver(function, args); size_t input_count = args.count() + CallKnownJSFunction::kFixedInputCount; @@ -3420,12 +3460,13 @@ ValueNode* MaglevGraphBuilder::TryBuildCallKnownJSFunction( return AddNode(call); } -bool MaglevGraphBuilder::BuildCheckValue(ValueNode* node, - const compiler::ObjectRef& ref) { +ReduceResult MaglevGraphBuilder::BuildCheckValue( + ValueNode* node, const compiler::ObjectRef& ref) { if (node->Is()) { - if (node->Cast()->object().equals(ref)) return true; + if (node->Cast()->object().equals(ref)) + return ReduceResult::Done(); EmitUnconditionalDeopt(DeoptimizeReason::kUnknown); - return false; + return ReduceResult::DoneWithAbort(); } // TODO: Add CheckValue support for numbers (incl. conversion between Smi and // HeapNumber). @@ -3436,10 +3477,10 @@ bool MaglevGraphBuilder::BuildCheckValue(ValueNode* node, } else { AddNewNode({node}, ref); } - return true; + return ReduceResult::Done(); } -ValueNode* MaglevGraphBuilder::ReduceCall( +ReduceResult MaglevGraphBuilder::ReduceCall( compiler::ObjectRef object, CallArguments& args, const compiler::FeedbackSource& feedback_source, SpeculationMode speculation_mode) { @@ -3457,40 +3498,33 @@ ValueNode* MaglevGraphBuilder::ReduceCall( } DCHECK(target.object()->IsCallable()); - if (ValueNode* result = - TryReduceBuiltin(target, args, feedback_source, speculation_mode)) { - return result; - } - if (ValueNode* result = TryBuildCallKnownJSFunction(target, args)) { - return result; - } + RETURN_IF_DONE( + TryReduceBuiltin(target, args, feedback_source, speculation_mode)); + RETURN_IF_DONE(TryBuildCallKnownJSFunction(target, args)); } return BuildGenericCall(GetConstant(target), GetContext(), Call::TargetType::kJSFunction, args); } -ValueNode* MaglevGraphBuilder::ReduceCallForTarget( +ReduceResult MaglevGraphBuilder::ReduceCallForTarget( ValueNode* target_node, compiler::JSFunctionRef target, CallArguments& args, const compiler::FeedbackSource& feedback_source, SpeculationMode speculation_mode) { - if (!BuildCheckValue(target_node, target)) return nullptr; + if (BuildCheckValue(target_node, target).IsDoneWithAbort()) + return ReduceResult::DoneWithAbort(); return ReduceCall(target, args, feedback_source, speculation_mode); } -ValueNode* MaglevGraphBuilder::ReduceFunctionPrototypeApplyCallWithReceiver( +ReduceResult MaglevGraphBuilder::ReduceFunctionPrototypeApplyCallWithReceiver( ValueNode* target_node, compiler::JSFunctionRef receiver, CallArguments& args, const compiler::FeedbackSource& feedback_source, SpeculationMode speculation_mode) { compiler::NativeContextRef native_context = broker()->target_native_context(); - if (!BuildCheckValue(target_node, - native_context.function_prototype_apply())) { - return nullptr; - } + RETURN_IF_ABORT( + BuildCheckValue(target_node, native_context.function_prototype_apply())); ValueNode* receiver_node = GetTaggedOrUndefined(args.receiver()); - if (!BuildCheckValue(receiver_node, receiver)) { - return nullptr; - } - ValueNode* call; + RETURN_IF_ABORT(BuildCheckValue(receiver_node, receiver)); + ReduceResult call; if (args.count() == 0) { // No need for spread. CallArguments empty_args(ConvertReceiverMode::kNullOrUndefined); @@ -3539,21 +3573,23 @@ void MaglevGraphBuilder::BuildCall(ValueNode* target_node, CallArguments& args, call_feedback.target()->IsJSFunction()) { CallFeedbackContent content = call_feedback.call_feedback_content(); compiler::JSFunctionRef function = call_feedback.target()->AsJSFunction(); - ValueNode* call; + ReduceResult result; if (content == CallFeedbackContent::kTarget) { - call = ReduceCallForTarget(target_node, function, args, feedback_source, - call_feedback.speculation_mode()); + result = ReduceCallForTarget(target_node, function, args, feedback_source, + call_feedback.speculation_mode()); } else { DCHECK_EQ(content, CallFeedbackContent::kReceiver); // We only collect receiver feedback for FunctionPrototypeApply. // See CollectCallFeedback in ic-callable.tq - call = ReduceFunctionPrototypeApplyCallWithReceiver( + result = ReduceFunctionPrototypeApplyCallWithReceiver( target_node, function, args, feedback_source, call_feedback.speculation_mode()); } - // If {call} is null, we hit an unconditional deopt. - if (!call) return; - SetAccumulator(call); + if (result.IsDoneWithAbort()) { + return; + } + DCHECK(result.HasValue()); + SetAccumulator(result.value()); return; } @@ -4134,7 +4170,10 @@ bool MaglevGraphBuilder::TryBuildFastInstanceOf( // Call @@hasInstance CallArguments args(ConvertReceiverMode::kNotNullOrUndefined, {callable_node, object}); - ValueNode* call = ReduceCall(*has_instance_field, args); + ReduceResult result = ReduceCall(*has_instance_field, args); + // TODO(victorgomes): Propagate the case if we need to soft deopt. + DCHECK(!result.IsDoneWithAbort()); + ValueNode* call = result.value(); // Make sure that a lazy deopt after the @@hasInstance call also performs // ToBoolean before returning to the interpreter. @@ -4662,12 +4701,27 @@ void MaglevGraphBuilder::BuildBranchIfToBooleanTrue(ValueNode* node, JumpType jump_type) { int fallthrough_offset = next_offset(); int jump_offset = iterator_.GetJumpTargetOffset(); + + if (IsConstantNode(node->opcode())) { + bool constant_is_true = FromConstantToBool(local_isolate(), node); + bool is_jump_taken = constant_is_true == (jump_type == kJumpIfTrue); + if (is_jump_taken) { + BasicBlock* block = FinishBlock({}, &jump_targets_[jump_offset]); + MergeDeadIntoFrameState(fallthrough_offset); + MergeIntoFrameState(block, jump_offset); + } else { + MergeDeadIntoFrameState(jump_offset); + } + return; + } + BasicBlockRef* true_target = jump_type == kJumpIfTrue ? &jump_targets_[jump_offset] : &jump_targets_[fallthrough_offset]; BasicBlockRef* false_target = jump_type == kJumpIfFalse ? &jump_targets_[jump_offset] : &jump_targets_[fallthrough_offset]; + BasicBlock* block = FinishBlock({node}, true_target, false_target); if (jump_type == kJumpIfTrue) { @@ -4766,6 +4820,7 @@ void MaglevGraphBuilder::VisitForInPrepare() { ForInHint hint = broker()->GetFeedbackForForIn(feedback_source); + current_for_in_state = ForInState(); switch (hint) { case ForInHint::kNone: case ForInHint::kEnumCacheKeysAndIndices: @@ -4780,6 +4835,7 @@ void MaglevGraphBuilder::VisitForInPrepare() { {descriptor_array}, DescriptorArray::kEnumCacheOffset); auto* cache_array = AddNewNode({enum_cache}, EnumCache::kKeysOffset); + current_for_in_state.enum_cache = enum_cache; auto* cache_length = AddNewNode({enumerator}); @@ -4803,6 +4859,8 @@ void MaglevGraphBuilder::VisitForInPrepare() { // cache_array, and cache_length respectively. Cache type is already set // above, so store the remaining two now. StoreRegisterPair({cache_array_reg, cache_length_reg}, result); + // Force a conversion to Int32 for the cache length value. + GetInt32(cache_length_reg); break; } } @@ -4810,10 +4868,14 @@ void MaglevGraphBuilder::VisitForInPrepare() { void MaglevGraphBuilder::VisitForInContinue() { // ForInContinue - ValueNode* index = LoadRegisterTagged(0); - ValueNode* cache_length = LoadRegisterTagged(1); - // TODO(verwaest): Fold with the next instruction. - SetAccumulator(AddNewNode({index, cache_length})); + ValueNode* index = LoadRegisterInt32(0); + ValueNode* cache_length = LoadRegisterInt32(1); + if (TryBuildBranchFor({index, cache_length}, + Operation::kLessThan)) { + return; + } + SetAccumulator( + AddNewNode>({index, cache_length})); } void MaglevGraphBuilder::VisitForInNext() { @@ -4838,7 +4900,26 @@ void MaglevGraphBuilder::VisitForInNext() { auto* receiver_map = AddNewNode({receiver}, HeapObject::kMapOffset); AddNewNode({receiver_map, cache_type}); - SetAccumulator(AddNewNode({cache_array, index})); + auto* key = AddNewNode({cache_array, index}); + SetAccumulator(key); + + current_for_in_state.receiver = receiver; + if (ToObject* to_object = + current_for_in_state.receiver->TryCast()) { + current_for_in_state.receiver = to_object->value_input().node(); + } + current_for_in_state.receiver_needs_map_check = false; + current_for_in_state.cache_type = cache_type; + current_for_in_state.key = key; + if (hint == ForInHint::kEnumCacheKeysAndIndices) { + current_for_in_state.index = index; + } + // We know that the enum cache entry is not undefined, so skip over the + // next JumpIfUndefined. + DCHECK_EQ(iterator_.next_bytecode(), + interpreter::Bytecode::kJumpIfUndefined); + iterator_.Advance(); + MergeDeadIntoFrameState(iterator_.GetJumpTargetOffset()); break; } case ForInHint::kAny: { @@ -4855,6 +4936,7 @@ void MaglevGraphBuilder::VisitForInNext() { void MaglevGraphBuilder::VisitForInStep() { ValueNode* index = LoadRegisterInt32(0); SetAccumulator(AddNewNode>({index})); + current_for_in_state = ForInState(); } void MaglevGraphBuilder::VisitSetPendingMessage() { diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index 0c750b874b..d5adb58202 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -61,6 +61,74 @@ inline void MarkAsLazyDeoptResult(ValueNode* value, } } +class ReduceResult { + public: + enum Kind { + kDoneWithValue = 0, // No need to mask while returning the pointer. + kDoneWithAbort, + kDoneWithoutValue, + kFail, + kNone, + }; + + ReduceResult() : payload_(kNone) {} + + // NOLINTNEXTLINE + ReduceResult(ValueNode* value) : payload_(value) { DCHECK_NOT_NULL(value); } + + ValueNode* value() const { + DCHECK(HasValue()); + return payload_.GetPointerWithKnownPayload(kDoneWithValue); + } + bool HasValue() const { return kind() == kDoneWithValue; } + + static ReduceResult Done(ValueNode* value) { return ReduceResult(value); } + static ReduceResult Done() { return ReduceResult(kDoneWithoutValue); } + static ReduceResult DoneWithAbort() { return ReduceResult(kDoneWithAbort); } + static ReduceResult Fail() { return ReduceResult(kFail); } + + ReduceResult(const ReduceResult&) V8_NOEXCEPT = default; + ReduceResult& operator=(const ReduceResult&) V8_NOEXCEPT = default; + + // No/undefined result, created by default constructor. + bool IsNone() const { return kind() == kNone; } + + // Either DoneWithValue, DoneWithoutValue or DoneWithAbort. + bool IsDone() const { return !IsFail() && !IsNone(); } + + // ReduceResult failed. + bool IsFail() const { return kind() == kFail; } + + // Done with a ValueNode. + bool IsDoneWithValue() const { return HasValue(); } + + // Done without producing a ValueNode. + bool IsDoneWithoutValue() const { return kind() == kDoneWithoutValue; } + + // Done with an abort (unconditional deopt, infinite loop in an inlined + // function, etc) + bool IsDoneWithAbort() const { return kind() == kDoneWithAbort; } + + Kind kind() const { return payload_.GetPayload(); } + + private: + explicit ReduceResult(Kind kind) : payload_(kind) {} + base::PointerWithPayload payload_; +}; + +#define RETURN_IF_DONE(result) \ + do { \ + auto res = result; \ + if (res.IsDone()) { \ + return res; \ + } \ + } while (false) + +#define RETURN_IF_ABORT(result) \ + if (result.IsDoneWithAbort()) { \ + return ReduceResult::DoneWithAbort(); \ + } + class MaglevGraphBuilder { public: explicit MaglevGraphBuilder(LocalIsolate* local_isolate, @@ -86,6 +154,9 @@ class MaglevGraphBuilder { BuildBody(); } + ReduceResult BuildInlined(const CallArguments& args, BasicBlockRef* start_ref, + BasicBlockRef* end_ref); + void StartPrologue(); void SetArgument(int i, ValueNode* value); void InitializeRegister(interpreter::Register reg, @@ -299,7 +370,8 @@ class MaglevGraphBuilder { auto detail = merge_state->is_exception_handler() ? "exception handler" : merge_state->is_loop() ? "loop header" : "merge"; - std::cout << "== New block (" << detail << ") ==" << std::endl; + std::cout << "== New block (" << detail << ") at " << function() + << "==" << std::endl; } if (merge_state->is_exception_handler()) { @@ -1039,8 +1111,14 @@ class MaglevGraphBuilder { zone()->New( *compilation_unit_, GetOutLiveness(), current_interpreter_frame_), BytecodeOffset(iterator_.current_offset()), current_source_position_, - // TODO(leszeks): Support inlining for lazy deopts. - nullptr); + // TODO(leszeks): Don't always allocate for the parent state, + // maybe cache it on the graph builder? + parent_ ? zone()->New(parent_->GetLatestCheckpointedFrame()) + : nullptr); + } + + void MarkPossibleMapMigration() { + current_for_in_state.receiver_needs_map_check = true; } void MarkPossibleSideEffect() { @@ -1068,6 +1146,9 @@ class MaglevGraphBuilder { // clear those. known_node_aspects().loaded_properties.clear(); known_node_aspects().loaded_context_slots.clear(); + + // Any side effect could also be a map migration. + MarkPossibleMapMigration(); } int next_offset() const { @@ -1146,7 +1227,8 @@ class MaglevGraphBuilder { if (NumPredecessors(next_block_offset) == 1) { if (v8_flags.trace_maglev_graph_building) { - std::cout << "== New block (single fallthrough) ==" << std::endl; + std::cout << "== New block (single fallthrough) at " << function() + << "==" << std::endl; } StartNewBlock(next_block_offset); } else { @@ -1162,14 +1244,15 @@ class MaglevGraphBuilder { } ValueNode* GetConvertReceiver(compiler::JSFunctionRef function, - CallArguments& args); + const CallArguments& args); template - ValueNode* TryBuildLoadDataView(const CallArguments& args, - ExternalArrayType type); + ReduceResult TryBuildLoadDataView(const CallArguments& args, + ExternalArrayType type); template - ValueNode* TryBuildStoreDataView(const CallArguments& args, - ExternalArrayType type, Function&& getValue); + ReduceResult TryBuildStoreDataView(const CallArguments& args, + ExternalArrayType type, + Function&& getValue); #define MATH_UNARY_IEEE_BUILTIN(V) \ V(MathAcos) \ @@ -1192,53 +1275,54 @@ class MaglevGraphBuilder { V(MathTan) \ V(MathTanh) -#define MAGLEV_REDUCED_BUILTIN(V) \ - V(DataViewPrototypeGetInt8) \ - V(DataViewPrototypeSetInt8) \ - V(DataViewPrototypeGetInt16) \ - V(DataViewPrototypeSetInt16) \ - V(DataViewPrototypeGetInt32) \ - V(DataViewPrototypeSetInt32) \ - V(DataViewPrototypeGetFloat64) \ - V(DataViewPrototypeSetFloat64) \ - V(FunctionPrototypeCall) \ - V(MathPow) \ - V(StringFromCharCode) \ - V(StringPrototypeCharCodeAt) \ +#define MAGLEV_REDUCED_BUILTIN(V) \ + V(DataViewPrototypeGetInt8) \ + V(DataViewPrototypeSetInt8) \ + V(DataViewPrototypeGetInt16) \ + V(DataViewPrototypeSetInt16) \ + V(DataViewPrototypeGetInt32) \ + V(DataViewPrototypeSetInt32) \ + V(DataViewPrototypeGetFloat64) \ + V(DataViewPrototypeSetFloat64) \ + V(FunctionPrototypeCall) \ + V(ObjectPrototypeHasOwnProperty) \ + V(MathPow) \ + V(StringFromCharCode) \ + V(StringPrototypeCharCodeAt) \ MATH_UNARY_IEEE_BUILTIN(V) -#define DEFINE_BUILTIN_REDUCER(Name) \ - ValueNode* TryReduce##Name(compiler::JSFunctionRef builtin_target, \ - CallArguments& args); +#define DEFINE_BUILTIN_REDUCER(Name) \ + ReduceResult TryReduce##Name(compiler::JSFunctionRef builtin_target, \ + CallArguments& args); MAGLEV_REDUCED_BUILTIN(DEFINE_BUILTIN_REDUCER) #undef DEFINE_BUILTIN_REDUCER template CallNode* AddNewCallNode(const CallArguments& args, Args&&... extra_args); - ValueNode* TryReduceBuiltin(compiler::JSFunctionRef builtin_target, - CallArguments& args, - const compiler::FeedbackSource& feedback_source, - SpeculationMode speculation_mode); - ValueNode* TryBuildCallKnownJSFunction(compiler::JSFunctionRef function, - CallArguments& args); - ValueNode* TryBuildInlinedCall(compiler::JSFunctionRef function, - CallArguments& args); + ReduceResult TryReduceBuiltin(compiler::JSFunctionRef builtin_target, + CallArguments& args, + const compiler::FeedbackSource& feedback_source, + SpeculationMode speculation_mode); + ReduceResult TryBuildCallKnownJSFunction(compiler::JSFunctionRef function, + CallArguments& args); + ReduceResult TryBuildInlinedCall(compiler::JSFunctionRef function, + CallArguments& args); ValueNode* BuildGenericCall(ValueNode* target, ValueNode* context, Call::TargetType target_type, const CallArguments& args, const compiler::FeedbackSource& feedback_source = compiler::FeedbackSource()); - ValueNode* ReduceCall( + ReduceResult ReduceCall( compiler::ObjectRef target, CallArguments& args, const compiler::FeedbackSource& feedback_source = compiler::FeedbackSource(), SpeculationMode speculation_mode = SpeculationMode::kDisallowSpeculation); - ValueNode* ReduceCallForTarget( + ReduceResult ReduceCallForTarget( ValueNode* target_node, compiler::JSFunctionRef target, CallArguments& args, const compiler::FeedbackSource& feedback_source, SpeculationMode speculation_mode); - ValueNode* ReduceFunctionPrototypeApplyCallWithReceiver( + ReduceResult ReduceFunctionPrototypeApplyCallWithReceiver( ValueNode* target_node, compiler::JSFunctionRef receiver, CallArguments& args, const compiler::FeedbackSource& feedback_source, SpeculationMode speculation_mode); @@ -1266,7 +1350,7 @@ class MaglevGraphBuilder { base::Vector maps); // Emits an unconditional deopt and returns false if the node is a constant // that doesn't match the ref. - bool BuildCheckValue(ValueNode* node, const compiler::ObjectRef& ref); + ReduceResult BuildCheckValue(ValueNode* node, const compiler::ObjectRef& ref); ValueNode* GetInt32ElementIndex(interpreter::Register reg) { ValueNode* index_object = current_interpreter_frame_.get(reg); @@ -1469,6 +1553,9 @@ class MaglevGraphBuilder { const compiler::BytecodeArrayRef& bytecode() const { return compilation_unit_->bytecode(); } + const compiler::JSFunctionRef& function() const { + return compilation_unit_->function(); + } const compiler::BytecodeAnalysis& bytecode_analysis() const { return bytecode_analysis_; } @@ -1510,6 +1597,16 @@ class MaglevGraphBuilder { BasicBlock* current_block_ = nullptr; base::Optional latest_checkpointed_frame_; SourcePosition current_source_position_; + struct ForInState { + ValueNode* receiver = nullptr; + ValueNode* cache_type = nullptr; + ValueNode* enum_cache = nullptr; + ValueNode* key = nullptr; + ValueNode* index = nullptr; + bool receiver_needs_map_check = false; + }; + // TODO(leszeks): Allow having a stack of these. + ForInState current_for_in_state = ForInState(); BasicBlockRef* jump_targets_; MergePointInterpreterFrameState** merge_states_; diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 1b552f8e37..6309178938 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -182,9 +182,8 @@ bool RootConstant::ToBoolean(LocalIsolate* local_isolate) const { return RootToBoolean(index_); } -bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) { +bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node) { DCHECK(IsConstantNode(node->opcode())); - LocalIsolate* local_isolate = masm->isolate()->AsLocalIsolate(); switch (node->opcode()) { #define CASE(Name) \ case Opcode::k##Name: { \ @@ -197,6 +196,14 @@ bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) { } } +bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node) { + // TODO(leszeks): Getting the main thread local isolate is not what we + // actually want here, but it's all we have, and it happens to work because + // really all we're using it for is ReadOnlyRoots. We should change ToBoolean + // to be able to pass ReadOnlyRoots in directly. + return FromConstantToBool(masm->isolate()->AsLocalIsolate(), node); +} + DeoptInfo::DeoptInfo(Zone* zone, DeoptFrame top_frame, compiler::FeedbackSource feedback_to_update) : top_frame_(top_frame), @@ -953,7 +960,7 @@ void LoadDoubleField::GenerateCode(MaglevAssembler* masm, Register tmp = temps.Acquire(); Register object = ToRegister(object_input()); __ AssertNotSmi(object); - __ DecompressAnyTagged(tmp, FieldMemOperand(object, offset())); + __ DecompressTagged(tmp, FieldMemOperand(object, offset())); __ AssertNotSmi(tmp); __ LoadHeapNumberValue(ToDoubleRegister(result()), tmp); } @@ -966,8 +973,186 @@ void LoadTaggedField::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register object = ToRegister(object_input()); __ AssertNotSmi(object); - __ DecompressAnyTagged(ToRegister(result()), - FieldMemOperand(object, offset())); + __ DecompressTagged(ToRegister(result()), FieldMemOperand(object, offset())); +} + +void LoadTaggedFieldByFieldIndex::SetValueLocationConstraints() { + UseRegister(object_input()); + UseAndClobberRegister(index_input()); + DefineAsRegister(this); + set_temporaries_needed(1); + set_double_temporaries_needed(1); +} +void LoadTaggedFieldByFieldIndex::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + Register result_reg = ToRegister(result()); + __ AssertNotSmi(object); + __ AssertSmi(index); + + ZoneLabelRef done(masm); + + // For in-object properties, the index is encoded as: + // + // index = actual_index | is_double_bit | smi_tag_bit + // = actual_index << 2 | is_double_bit << 1 + // + // The value we want is at the field offset: + // + // (actual_index << kTaggedSizeLog2) + JSObject::kHeaderSize + // + // We could get index from actual_index by shifting away the double and smi + // bits. But, note that `kTaggedSizeLog2 == 2` and `index` encodes + // `actual_index` with a two bit shift. So, we can do some rearranging + // to get the offset without shifting: + // + // ((index >> 2) << kTaggedSizeLog2 + JSObject::kHeaderSize + // + // [Expand definitions of index and kTaggedSizeLog2] + // = (((actual_index << 2 | is_double_bit << 1) >> 2) << 2) + // + JSObject::kHeaderSize + // + // [Cancel out shift down and shift up, clear is_double bit by subtracting] + // = (actual_index << 2 | is_double_bit << 1) - (is_double_bit << 1) + // + JSObject::kHeaderSize + // + // [Fold together the constants, and collapse definition of index] + // = index + (JSObject::kHeaderSize - (is_double_bit << 1)) + // + // + // For out-of-object properties, the encoding is: + // + // index = (-1 - actual_index) | is_double_bit | smi_tag_bit + // = (-1 - actual_index) << 2 | is_double_bit << 1 + // = (-1 - actual_index) * 4 + (is_double_bit ? 2 : 0) + // = -(actual_index * 4) + (is_double_bit ? 2 : 0) - 4 + // = -(actual_index << 2) + (is_double_bit ? 2 : 0) - 4 + // + // The value we want is in the property array at offset: + // + // (actual_index << kTaggedSizeLog2) + FixedArray::kHeaderSize + // + // [Expand definition of kTaggedSizeLog2] + // = (actual_index << 2) + FixedArray::kHeaderSize + // + // [Substitute in index] + // = (-index + (is_double_bit ? 2 : 0) - 4) + FixedArray::kHeaderSize + // + // [Fold together the constants] + // = -index + (FixedArray::kHeaderSize + (is_double_bit ? 2 : 0) - 4)) + // + // This allows us to simply negate the index register and do a load with + // otherwise constant offset. + + // Check if field is a mutable double field. + static constexpr int32_t kIsDoubleBitMask = 1 << kSmiTagSize; + __ TestInt32AndJumpIfAnySet( + index, kIsDoubleBitMask, + __ MakeDeferredCode( + [](MaglevAssembler* masm, Register object, Register index, + Register result_reg, RegisterSnapshot register_snapshot, + ZoneLabelRef done) { + // The field is a Double field, a.k.a. a mutable HeapNumber. + static const int kIsDoubleBit = 1; + + // Check if field is in-object or out-of-object. The is_double bit + // value doesn't matter, since negative values will stay negative. + Label if_outofobject, loaded_field; + __ CompareInt32AndJumpIf(index, 0, kLessThan, &if_outofobject); + + // The field is located in the {object} itself. + { + // See giant comment above. + static_assert(kTaggedSizeLog2 == 2); + static_assert(kSmiTagSize == 1); + // We haven't untagged, so we need to sign extend. + __ SignExtend32To64Bits(index, index); + __ LoadTaggedFieldByIndex( + result_reg, object, index, 1, + JSObject::kHeaderSize - (kIsDoubleBit << kSmiTagSize)); + __ Jump(&loaded_field); + } + + __ bind(&if_outofobject); + { + MaglevAssembler::ScratchRegisterScope temps(masm); + Register property_array = temps.Acquire(); + // Load the property array. + __ LoadTaggedField( + property_array, + FieldMemOperand(object, JSObject::kPropertiesOrHashOffset)); + + // See giant comment above. + static_assert(kSmiTagSize == 1); + __ NegateInt32(index); + __ LoadTaggedFieldByIndex( + result_reg, property_array, index, 1, + FixedArray::kHeaderSize + (kIsDoubleBit << kSmiTagSize) - 4); + __ Jump(&loaded_field); + } + + __ bind(&loaded_field); + // We may have transitioned in-place away from double, so check that + // this is a HeapNumber -- otherwise the load is fine and we don't + // need to copy anything anyway. + __ JumpIfSmi(result_reg, *done); + // index is no longer needed and is clobbered by this node, so + // reuse it as a scratch reg storing the map. + Register map = index; + __ LoadMap(map, result_reg); + __ JumpIfNotRoot(map, RootIndex::kHeapNumberMap, *done); + MaglevAssembler::ScratchRegisterScope temps(masm); + DoubleRegister double_value = temps.AcquireDouble(); + __ LoadHeapNumberValue(double_value, result_reg); + __ AllocateHeapNumber(register_snapshot, result_reg, double_value); + __ Jump(*done); + }, + object, index, result_reg, register_snapshot(), done)); + + // The field is a proper Tagged field on {object}. The {index} is shifted + // to the left by one in the code below. + { + static const int kIsDoubleBit = 0; + + // Check if field is in-object or out-of-object. The is_double bit value + // doesn't matter, since negative values will stay negative. + Label if_outofobject; + __ CompareInt32AndJumpIf(index, 0, kLessThan, &if_outofobject); + + // The field is located in the {object} itself. + { + // See giant comment above. + static_assert(kTaggedSizeLog2 == 2); + static_assert(kSmiTagSize == 1); + // We haven't untagged, so we need to sign extend. + __ SignExtend32To64Bits(index, index); + __ LoadTaggedFieldByIndex( + result_reg, object, index, 1, + JSObject::kHeaderSize - (kIsDoubleBit << kSmiTagSize)); + __ Jump(*done); + } + + __ bind(&if_outofobject); + { + MaglevAssembler::ScratchRegisterScope temps(masm); + Register property_array = temps.Acquire(); + // Load the property array. + __ LoadTaggedField( + property_array, + FieldMemOperand(object, JSObject::kPropertiesOrHashOffset)); + + // See giant comment above. + static_assert(kSmiTagSize == 1); + __ NegateInt32(index); + __ LoadTaggedFieldByIndex( + result_reg, property_array, index, 1, + FixedArray::kHeaderSize + (kIsDoubleBit << kSmiTagSize) - 4); + // Fallthrough to `done`. + } + } + + __ bind(*done); } namespace { @@ -988,26 +1173,29 @@ void EmitPolymorphicAccesses(MaglevAssembler* masm, NodeT* node, for (const PolymorphicAccessInfo& access_info : node->access_infos()) { Label next; Label map_found; - bool has_heap_number_map = false; + auto& maps = access_info.maps(); - for (auto it = access_info.maps().begin(); it != access_info.maps().end(); - ++it) { - if (it->IsHeapNumberMap()) { - has_heap_number_map = true; - } - __ CompareTagged(object_map, it->object()); - if (it == access_info.maps().end() - 1) { - __ JumpIf(kNotEqual, &next); - // Fallthrough... to map_found. - } else { - __ JumpIf(kEqual, &map_found); - } - } - - // Bind number case here if one of the maps is HeapNumber. - if (has_heap_number_map) { + if (HasOnlyNumberMaps(base::VectorOf(maps))) { + __ CompareRoot(object_map, RootIndex::kHeapNumberMap); + __ JumpIf(kNotEqual, &next); + // Fallthrough... to map_found. DCHECK(!is_number.is_bound()); __ bind(&is_number); + } else if (HasOnlyStringMaps(base::VectorOf(maps))) { + __ CompareInstanceTypeRange(object_map, FIRST_STRING_TYPE, + LAST_STRING_TYPE); + __ JumpIf(kUnsignedGreaterThan, &next); + // Fallthrough... to map_found. + } else { + for (auto it = maps.begin(); it != maps.end(); ++it) { + __ CompareTagged(object_map, it->object()); + if (it == maps.end() - 1) { + __ JumpIf(kNotEqual, &next); + // Fallthrough... to map_found. + } else { + __ JumpIf(kEqual, &map_found); + } + } } __ bind(&map_found); @@ -1062,8 +1250,8 @@ void LoadPolymorphicTaggedField::GenerateCode(MaglevAssembler* masm, Register cell = map; // Reuse scratch. __ Move(cell, access_info.cell()); __ AssertNotSmi(cell); - __ DecompressAnyTagged(result, - FieldMemOperand(cell, Cell::kValueOffset)); + __ DecompressTagged(result, + FieldMemOperand(cell, Cell::kValueOffset)); break; } case PolymorphicAccessInfo::kDataLoad: { @@ -1875,7 +2063,7 @@ void GeneratorRestoreRegister::GenerateCode(MaglevAssembler* masm, Register value = (array == result_reg ? temp : result_reg); // Loads the current value in the generator register file. - __ DecompressAnyTagged( + __ DecompressTagged( value, FieldMemOperand(array, FixedArray::OffsetOfElementAt(index()))); // And trashs it with StaleRegisterConstant. @@ -2426,9 +2614,9 @@ void CallKnownJSFunction::GenerateCode(MaglevAssembler* masm, __ CallBuiltin(shared_function_info().builtin_id()); } else { __ AssertCallableFunction(kJavaScriptCallTargetRegister); - __ LoadTaggedPointerField(kJavaScriptCallCodeStartRegister, - FieldMemOperand(kJavaScriptCallTargetRegister, - JSFunction::kCodeOffset)); + __ LoadTaggedField(kJavaScriptCallCodeStartRegister, + FieldMemOperand(kJavaScriptCallTargetRegister, + JSFunction::kCodeOffset)); __ CallCodeObject(kJavaScriptCallCodeStartRegister); } masm->DefineExceptionHandlerAndLazyDeoptPoint(this); diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index d02b9ee4fe..e6cb3a659c 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -165,6 +165,7 @@ class CompactInterpreterFrameState; V(LoadPolymorphicTaggedField) \ V(LoadTaggedField) \ V(LoadDoubleField) \ + V(LoadTaggedFieldByFieldIndex) \ V(LoadFixedArrayElement) \ V(LoadFixedDoubleArrayElement) \ V(LoadSignedIntDataViewElement) \ @@ -414,6 +415,7 @@ enum class ValueRepresentation : uint8_t { constexpr Condition ConditionFor(Operation cond); +bool FromConstantToBool(LocalIsolate* local_isolate, ValueNode* node); bool FromConstantToBool(MaglevAssembler* masm, ValueNode* node); inline int ExternalArrayElementSize(const ExternalArrayType element_type) { @@ -463,6 +465,20 @@ inline std::ostream& operator<<(std::ostream& os, return os; } +inline bool HasOnlyStringMaps(base::Vector maps) { + for (compiler::MapRef map : maps) { + if (!map.IsStringMap()) return false; + } + return true; +} + +inline bool HasOnlyNumberMaps(base::Vector maps) { + for (compiler::MapRef map : maps) { + if (map.instance_type() != HEAP_NUMBER_TYPE) return false; + } + return true; +} + #define DEF_FORWARD_DECLARATION(type, ...) class type; NODE_BASE_LIST(DEF_FORWARD_DECLARATION) #undef DEF_FORWARD_DECLARATION @@ -4284,6 +4300,29 @@ class LoadDoubleField : public FixedInputValueNodeT<1, LoadDoubleField> { const int offset_; }; +class LoadTaggedFieldByFieldIndex + : public FixedInputValueNodeT<2, LoadTaggedFieldByFieldIndex> { + using Base = FixedInputValueNodeT<2, LoadTaggedFieldByFieldIndex>; + + public: + explicit LoadTaggedFieldByFieldIndex(uint64_t bitfield) : Base(bitfield) {} + + static constexpr OpProperties kProperties = + OpProperties::Reading() | OpProperties::DeferredCall(); + static constexpr typename Base::InputTypes kInputTypes{ + ValueRepresentation::kTagged, ValueRepresentation::kTagged}; + + static constexpr int kObjectIndex = 0; + static constexpr int kIndexIndex = 1; + Input& object_input() { return input(kObjectIndex); } + Input& index_input() { return input(kIndexIndex); } + + int MaxCallStackArgs() const { return 0; } + void SetValueLocationConstraints(); + void GenerateCode(MaglevAssembler*, const ProcessingState&); + void PrintParams(std::ostream&, MaglevGraphLabeller*) const {} +}; + class LoadFixedArrayElement : public FixedInputValueNodeT<2, LoadFixedArrayElement> { using Base = FixedInputValueNodeT<2, LoadFixedArrayElement>; diff --git a/src/maglev/x64/maglev-assembler-x64-inl.h b/src/maglev/x64/maglev-assembler-x64-inl.h index b4104040d9..427a303d06 100644 --- a/src/maglev/x64/maglev-assembler-x64-inl.h +++ b/src/maglev/x64/maglev-assembler-x64-inl.h @@ -38,6 +38,19 @@ constexpr Condition ConditionFor(Operation operation) { } } +inline ScaleFactor ScaleFactorFromInt(int n) { + switch (n) { + case 1: + return times_1; + case 2: + return times_2; + case 4: + return times_4; + default: + UNREACHABLE(); + } +} + class MaglevAssembler::ScratchRegisterScope { public: explicit ScratchRegisterScope(MaglevAssembler* masm) @@ -223,6 +236,14 @@ inline void MaglevAssembler::BuildTypedArrayDataPointer(Register data_pointer, addq(data_pointer, base); } +inline void MaglevAssembler::LoadTaggedFieldByIndex(Register result, + Register object, + Register index, int scale, + int offset) { + LoadTaggedField( + result, FieldOperand(object, index, ScaleFactorFromInt(scale), offset)); +} + inline void MaglevAssembler::LoadBoundedSizeFromObject(Register result, Register object, int offset) { @@ -361,6 +382,7 @@ inline void MaglevAssembler::Move(Register dst, Handle obj) { inline void MaglevAssembler::SignExtend32To64Bits(Register dst, Register src) { movsxlq(dst, src); } +inline void MaglevAssembler::NegateInt32(Register val) { negl(val); } template inline void MaglevAssembler::DeoptIfBufferDetached(Register array, @@ -372,10 +394,10 @@ inline void MaglevAssembler::DeoptIfBufferDetached(Register array, ->DependOnArrayBufferDetachingProtector()) { // A detached buffer leads to megamorphic feedback, so we won't have a deopt // loop if we deopt here. - LoadTaggedPointerField( - scratch, FieldOperand(array, JSArrayBufferView::kBufferOffset)); - LoadTaggedPointerField( - scratch, FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset)); + LoadTaggedField(scratch, + FieldOperand(array, JSArrayBufferView::kBufferOffset)); + LoadTaggedField(scratch, + FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset)); testl(scratch, Immediate(JSArrayBuffer::WasDetachedBit::kMask)); EmitEagerDeoptIf(not_zero, DeoptimizeReason::kArrayBufferWasDetached, node); } @@ -405,6 +427,17 @@ inline void MaglevAssembler::CompareObjectTypeRange(Register heap_object, higher_limit); } +inline void MaglevAssembler::CompareInstanceTypeRange( + Register map, InstanceType lower_limit, InstanceType higher_limit) { + CompareInstanceTypeRange(map, kScratchRegister, lower_limit, higher_limit); +} + +inline void MaglevAssembler::CompareInstanceTypeRange( + Register map, Register instance_type_out, InstanceType lower_limit, + InstanceType higher_limit) { + CmpInstanceTypeRange(map, instance_type_out, lower_limit, higher_limit); +} + inline void MaglevAssembler::CompareTagged(Register reg, Handle obj) { Cmp(reg, obj); @@ -457,6 +490,26 @@ void MaglevAssembler::CompareInt32AndJumpIf(Register r1, Register r2, JumpIf(cond, target, distance); } +inline void MaglevAssembler::CompareInt32AndJumpIf(Register r1, int32_t value, + Condition cond, + Label* target, + Label::Distance distance) { + CompareInt32(r1, value); + JumpIf(cond, target, distance); +} + +inline void MaglevAssembler::TestInt32AndJumpIfAnySet( + Register r1, int32_t mask, Label* target, Label::Distance distance) { + testl(r1, Immediate(mask)); + JumpIf(kNotZero, target, distance); +} + +inline void MaglevAssembler::TestInt32AndJumpIfAllClear( + Register r1, int32_t mask, Label* target, Label::Distance distance) { + testl(r1, Immediate(mask)); + JumpIf(kZero, target, distance); +} + inline void MaglevAssembler::LoadHeapNumberValue(DoubleRegister result, Register heap_number) { Movsd(result, FieldOperand(heap_number, HeapNumber::kValueOffset)); diff --git a/src/maglev/x64/maglev-assembler-x64.cc b/src/maglev/x64/maglev-assembler-x64.cc index 1e3bffad2c..47dfe68006 100644 --- a/src/maglev/x64/maglev-assembler-x64.cc +++ b/src/maglev/x64/maglev-assembler-x64.cc @@ -111,8 +111,8 @@ void MaglevAssembler::LoadSingleCharacterString(Register result, DCHECK_NE(char_code, scratch); Register table = scratch; LoadRoot(table, RootIndex::kSingleCharacterStringTable); - DecompressAnyTagged(result, FieldOperand(table, char_code, times_tagged_size, - FixedArray::kHeaderSize)); + DecompressTagged(result, FieldOperand(table, char_code, times_tagged_size, + FixedArray::kHeaderSize)); } void MaglevAssembler::StringFromCharCode(RegisterSnapshot register_snapshot, @@ -160,7 +160,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot, Label cons_string; Label sliced_string; - DeferredCodeInfo* deferred_runtime_call = PushDeferredCode( + Label* deferred_runtime_call = MakeDeferredCode( [](MaglevAssembler* masm, RegisterSnapshot register_snapshot, ZoneLabelRef done, Register result, Register string, Register index) { DCHECK(!register_snapshot.live_registers.has(result)); @@ -218,14 +218,13 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot, cmpl(representation, Immediate(kSlicedStringTag)); j(equal, &sliced_string, Label::kNear); cmpl(representation, Immediate(kThinStringTag)); - j(not_equal, &deferred_runtime_call->deferred_code_label); + j(not_equal, deferred_runtime_call); // Fallthrough to thin string. } // Is a thin string. { - DecompressAnyTagged(string, - FieldOperand(string, ThinString::kActualOffset)); + DecompressTagged(string, FieldOperand(string, ThinString::kActualOffset)); jmp(&loop, Label::kNear); } @@ -234,8 +233,7 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot, Register offset = scratch; movl(offset, FieldOperand(string, SlicedString::kOffsetOffset)); SmiUntag(offset); - DecompressAnyTagged(string, - FieldOperand(string, SlicedString::kParentOffset)); + DecompressTagged(string, FieldOperand(string, SlicedString::kParentOffset)); addl(index, offset); jmp(&loop, Label::kNear); } @@ -244,8 +242,8 @@ void MaglevAssembler::StringCharCodeAt(RegisterSnapshot& register_snapshot, { CompareRoot(FieldOperand(string, ConsString::kSecondOffset), RootIndex::kempty_string); - j(not_equal, &deferred_runtime_call->deferred_code_label); - DecompressAnyTagged(string, FieldOperand(string, ConsString::kFirstOffset)); + j(not_equal, deferred_runtime_call); + DecompressTagged(string, FieldOperand(string, ConsString::kFirstOffset)); jmp(&loop, Label::kNear); // Try again with first string. } @@ -499,7 +497,7 @@ void MaglevAssembler::Prologue(Graph* graph) { Register flags = rcx; Register feedback_vector = r9; - DeferredCodeInfo* deferred_flags_need_processing = PushDeferredCode( + Label* deferred_flags_need_processing = MakeDeferredCode( [](MaglevAssembler* masm, Register flags, Register feedback_vector) { ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); // TODO(leszeks): This could definitely be a builtin that we @@ -514,7 +512,7 @@ void MaglevAssembler::Prologue(Graph* graph) { compilation_info()->toplevel_compilation_unit()->feedback().object()); LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( flags, feedback_vector, CodeKind::MAGLEV, - &deferred_flags_need_processing->deferred_code_label); + deferred_flags_need_processing); } EnterFrame(StackFrame::MAGLEV); diff --git a/src/maglev/x64/maglev-ir-x64.cc b/src/maglev/x64/maglev-ir-x64.cc index 27a720a73f..b7f8018c1b 100644 --- a/src/maglev/x64/maglev-ir-x64.cc +++ b/src/maglev/x64/maglev-ir-x64.cc @@ -57,7 +57,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register generator = ToRegister(generator_input()); Register array = WriteBarrierDescriptor::ObjectRegister(); - __ LoadTaggedPointerField( + __ LoadTaggedField( array, FieldOperand(generator, JSGeneratorObject::kParametersAndRegistersOffset)); @@ -71,7 +71,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, WriteBarrierDescriptor::SlotAddressRegister()); ZoneLabelRef done(masm); - DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode( + Label* deferred_write_barrier = __ MakeDeferredCode( [](MaglevAssembler* masm, ZoneLabelRef done, Register value, Register array, GeneratorStore* node, int32_t offset) { ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); @@ -107,7 +107,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, // with and without write barrier. __ CheckPageFlag(array, kScratchRegister, MemoryChunk::kPointersFromHereAreInterestingMask, not_zero, - &deferred_write_barrier->deferred_code_label); + deferred_write_barrier); __ bind(*done); } @@ -118,7 +118,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, context_input(), WriteBarrierDescriptor::SlotAddressRegister()); ZoneLabelRef done(masm); - DeferredCodeInfo* deferred_context_write_barrier = __ PushDeferredCode( + Label* deferred_context_write_barrier = __ MakeDeferredCode( [](MaglevAssembler* masm, ZoneLabelRef done, Register context, Register generator, GeneratorStore* node) { ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); @@ -156,7 +156,7 @@ void GeneratorStore::GenerateCode(MaglevAssembler* masm, __ AssertNotSmi(context); __ CheckPageFlag(generator, kScratchRegister, MemoryChunk::kPointersFromHereAreInterestingMask, not_zero, - &deferred_context_write_barrier->deferred_code_label); + deferred_context_write_barrier); __ bind(*done); __ StoreTaggedSignedField( @@ -498,8 +498,8 @@ void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm, __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister); __ Assert(greater_equal, AbortReason::kUnexpectedValue); } - __ LoadAnyTaggedField(kScratchRegister, - FieldOperand(object, JSObject::kElementsOffset)); + __ LoadTaggedField(kScratchRegister, + FieldOperand(object, JSObject::kElementsOffset)); if (v8_flags.debug_code) { __ AssertNotSmi(kScratchRegister); } @@ -548,8 +548,8 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm, // Deopt if this isn't a thin string. __ testb(map_tmp, Immediate(kThinStringTagBit)); __ EmitEagerDeoptIf(zero, DeoptimizeReason::kWrongMap, node); - __ LoadTaggedPointerField( - object, FieldOperand(object, ThinString::kActualOffset)); + __ LoadTaggedField(object, + FieldOperand(object, ThinString::kActualOffset)); if (v8_flags.debug_code) { __ RecordComment("DCHECK IsInternalizedString"); __ LoadMap(map_tmp, object); @@ -721,9 +721,9 @@ void LoadFixedArrayElement::GenerateCode(MaglevAssembler* masm, __ cmpq(index, Immediate(0)); __ Assert(above_equal, AbortReason::kUnexpectedNegativeValue); } - __ DecompressAnyTagged(result_reg, - FieldOperand(elements, index, times_tagged_size, - FixedArray::kHeaderSize)); + __ DecompressTagged(result_reg, + FieldOperand(elements, index, times_tagged_size, + FixedArray::kHeaderSize)); } void LoadFixedDoubleArrayElement::SetValueLocationConstraints() { @@ -977,19 +977,6 @@ void StoreDoubleDataViewElement::GenerateCode(MaglevAssembler* masm, namespace { -ScaleFactor ScaleFactorFromInt(int n) { - switch (n) { - case 1: - return times_1; - case 2: - return times_2; - case 4: - return times_4; - default: - UNREACHABLE(); - } -} - template void GenerateTypedArrayLoad(MaglevAssembler* masm, NodeT* node, Register object, Register index, ResultReg result_reg, @@ -1092,7 +1079,7 @@ void StoreDoubleField::GenerateCode(MaglevAssembler* masm, DoubleRegister value = ToDoubleRegister(value_input()); __ AssertNotSmi(object); - __ DecompressAnyTagged(tmp, FieldOperand(object, offset())); + __ DecompressTagged(tmp, FieldOperand(object, offset())); __ AssertNotSmi(tmp); __ Movsd(FieldOperand(tmp, HeapNumber::kValueOffset), value); } @@ -1117,7 +1104,7 @@ void StoreMap::GenerateCode(MaglevAssembler* masm, kScratchRegister); ZoneLabelRef done(masm); - DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode( + Label* deferred_write_barrier = __ MakeDeferredCode( [](MaglevAssembler* masm, ZoneLabelRef done, Register value, Register object, StoreMap* node) { ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); @@ -1150,7 +1137,7 @@ void StoreMap::GenerateCode(MaglevAssembler* masm, __ JumpIfSmi(value, *done); __ CheckPageFlag(object, kScratchRegister, MemoryChunk::kPointersFromHereAreInterestingMask, not_zero, - &deferred_write_barrier->deferred_code_label); + deferred_write_barrier); __ bind(*done); } @@ -1174,7 +1161,7 @@ void StoreTaggedFieldWithWriteBarrier::GenerateCode( __ StoreTaggedField(FieldOperand(object, offset()), value); ZoneLabelRef done(masm); - DeferredCodeInfo* deferred_write_barrier = __ PushDeferredCode( + Label* deferred_write_barrier = __ MakeDeferredCode( [](MaglevAssembler* masm, ZoneLabelRef done, Register value, Register object, StoreTaggedFieldWithWriteBarrier* node) { ASM_CODE_COMMENT_STRING(masm, "Write barrier slow path"); @@ -1207,7 +1194,7 @@ void StoreTaggedFieldWithWriteBarrier::GenerateCode( __ JumpIfSmi(value, *done); __ CheckPageFlag(object, kScratchRegister, MemoryChunk::kPointersFromHereAreInterestingMask, not_zero, - &deferred_write_barrier->deferred_code_label); + deferred_write_barrier); __ bind(*done); } @@ -2162,8 +2149,8 @@ void IncreaseInterruptBudget::GenerateCode(MaglevAssembler* masm, MaglevAssembler::ScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( - scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(scratch, + FieldOperand(scratch, JSFunction::kFeedbackCellOffset)); __ addl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset), Immediate(amount())); } @@ -2253,8 +2240,8 @@ void HandleInterruptsAndTiering(MaglevAssembler* masm, ZoneLabelRef done, __ incl(FieldOperand(scratch0, FeedbackVector::kProfilerTicksOffset)); // JSFunction::SetInterruptBudget. __ movq(scratch0, MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( - scratch0, FieldOperand(scratch0, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(scratch0, + FieldOperand(scratch0, JSFunction::kFeedbackCellOffset)); __ movl(FieldOperand(scratch0, FeedbackCell::kInterruptBudgetOffset), Immediate(v8_flags.interrupt_budget)); __ jmp(*done); @@ -2272,8 +2259,8 @@ void ReduceInterruptBudget::GenerateCode(MaglevAssembler* masm, MaglevAssembler::ScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ movq(scratch, MemOperand(rbp, StandardFrameConstants::kFunctionOffset)); - __ LoadTaggedPointerField( - scratch, FieldOperand(scratch, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedField(scratch, + FieldOperand(scratch, JSFunction::kFeedbackCellOffset)); __ subl(FieldOperand(scratch, FeedbackCell::kInterruptBudgetOffset), Immediate(amount())); ZoneLabelRef done(masm); @@ -2342,8 +2329,8 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { __ bind(&drop_dynamic_arg_size); // Drop receiver + arguments according to dynamic arguments size. - __ DropArguments(actual_params_size, r9, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); + __ DropArguments(actual_params_size, r9, MacroAssembler::kCountIsInteger, + MacroAssembler::kCountIncludesReceiver); __ Ret(); } diff --git a/src/objects/code-inl.h b/src/objects/code-inl.h index c571c0d035..ef20ec9529 100644 --- a/src/objects/code-inl.h +++ b/src/objects/code-inl.h @@ -1103,6 +1103,24 @@ bool InstructionStream::IsWeakObjectInDeoptimizationLiteralArray( HeapObject::cast(object)); } +void InstructionStream::IterateDeoptimizationLiterals(RootVisitor* v) { + if (kind() == CodeKind::BASELINE) return; + + auto deopt_data = DeoptimizationData::cast(deoptimization_data()); + if (deopt_data.length() == 0) return; + + DeoptimizationLiteralArray literals = deopt_data.LiteralArray(); + const int literals_length = literals.length(); + for (int i = 0; i < literals_length; ++i) { + MaybeObject maybe_literal = literals.Get(i); + HeapObject heap_literal; + if (maybe_literal.GetHeapObject(&heap_literal)) { + v->VisitRootPointer(Root::kStackRoots, "deoptimization literal", + FullObjectSlot(&heap_literal)); + } + } +} + // This field has to have relaxed atomic accessors because it is accessed in the // concurrent marker. static_assert(FIELD_SIZE(Code::kKindSpecificFlagsOffset) == kInt32Size); @@ -1398,15 +1416,50 @@ DEF_GETTER(BytecodeArray, SourcePositionTable, ByteArray) { return roots.empty_byte_array(); } +DEF_GETTER(BytecodeArray, raw_constant_pool, Object) { + Object value = + TaggedField::load(cage_base, *this, kConstantPoolOffset); + // This field might be 0 during deserialization. + DCHECK(value == Smi::zero() || value.IsFixedArray()); + return value; +} + +DEF_GETTER(BytecodeArray, raw_handler_table, Object) { + Object value = + TaggedField::load(cage_base, *this, kHandlerTableOffset); + // This field might be 0 during deserialization. + DCHECK(value == Smi::zero() || value.IsByteArray()); + return value; +} + +DEF_GETTER(BytecodeArray, raw_source_position_table, Object) { + Object value = + TaggedField::load(cage_base, *this, kSourcePositionTableOffset); + // This field might be 0 during deserialization. + DCHECK(value == Smi::zero() || value.IsByteArray() || value.IsUndefined() || + value.IsException()); + return value; +} + int BytecodeArray::BytecodeArraySize() const { return SizeFor(this->length()); } DEF_GETTER(BytecodeArray, SizeIncludingMetadata, int) { int size = BytecodeArraySize(); - size += constant_pool(cage_base).Size(cage_base); - size += handler_table(cage_base).Size(); - ByteArray table = SourcePositionTable(cage_base); - if (table.length() != 0) { - size += table.Size(); + Object maybe_constant_pool = raw_constant_pool(cage_base); + if (maybe_constant_pool.IsFixedArray()) { + size += FixedArray::cast(maybe_constant_pool).Size(cage_base); + } else { + DCHECK_EQ(maybe_constant_pool, Smi::zero()); + } + Object maybe_handler_table = raw_handler_table(cage_base); + if (maybe_handler_table.IsByteArray()) { + size += ByteArray::cast(maybe_handler_table).Size(); + } else { + DCHECK_EQ(maybe_handler_table, Smi::zero()); + } + Object maybe_table = raw_source_position_table(cage_base); + if (maybe_table.IsByteArray()) { + size += ByteArray::cast(maybe_table).Size(); } return size; } diff --git a/src/objects/code.h b/src/objects/code.h index 671f2ecf84..487f9baa35 100644 --- a/src/objects/code.h +++ b/src/objects/code.h @@ -715,6 +715,8 @@ class InstructionStream : public HeapObject { static inline bool IsWeakObjectInDeoptimizationLiteralArray(Object object); + inline void IterateDeoptimizationLiterals(RootVisitor* v); + // Returns true if the function is inlined in the code. bool Inlines(SharedFunctionInfo sfi); @@ -1066,6 +1068,11 @@ class BytecodeArray // this will return empty_byte_array. DECL_GETTER(SourcePositionTable, ByteArray) + // Raw accessors to access these fields during code cache deserialization. + DECL_GETTER(raw_constant_pool, Object) + DECL_GETTER(raw_handler_table, Object) + DECL_GETTER(raw_source_position_table, Object) + // Indicates that an attempt was made to collect source positions, but that it // failed most likely due to stack exhaustion. When in this state // |SourcePositionTable| will return an empty byte array rather than crashing diff --git a/src/objects/compressed-slots-inl.h b/src/objects/compressed-slots-inl.h index 1f1f0530be..670c86cc97 100644 --- a/src/objects/compressed-slots-inl.h +++ b/src/objects/compressed-slots-inl.h @@ -35,12 +35,12 @@ bool CompressedObjectSlot::contains_map_value(Address raw_value) const { Object CompressedObjectSlot::operator*() const { Tagged_t value = *location(); - return Object(TCompressionScheme::DecompressTaggedAny(address(), value)); + return Object(TCompressionScheme::DecompressTagged(address(), value)); } Object CompressedObjectSlot::load(PtrComprCageBase cage_base) const { Tagged_t value = *location(); - return Object(TCompressionScheme::DecompressTaggedAny(cage_base, value)); + return Object(TCompressionScheme::DecompressTagged(cage_base, value)); } void CompressedObjectSlot::store(Object value) const { @@ -63,17 +63,17 @@ Map CompressedObjectSlot::load_map() const { Object CompressedObjectSlot::Acquire_Load() const { AtomicTagged_t value = AsAtomicTagged::Acquire_Load(location()); - return Object(TCompressionScheme::DecompressTaggedAny(address(), value)); + return Object(TCompressionScheme::DecompressTagged(address(), value)); } Object CompressedObjectSlot::Relaxed_Load() const { AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location()); - return Object(TCompressionScheme::DecompressTaggedAny(address(), value)); + return Object(TCompressionScheme::DecompressTagged(address(), value)); } Object CompressedObjectSlot::Relaxed_Load(PtrComprCageBase cage_base) const { AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location()); - return Object(TCompressionScheme::DecompressTaggedAny(cage_base, value)); + return Object(TCompressionScheme::DecompressTagged(cage_base, value)); } void CompressedObjectSlot::Relaxed_Store(Object value) const { @@ -92,7 +92,7 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old, Tagged_t target_ptr = TCompressionScheme::CompressTagged(target.ptr()); Tagged_t result = AsAtomicTagged::Release_CompareAndSwap(location(), old_ptr, target_ptr); - return Object(TCompressionScheme::DecompressTaggedAny(address(), result)); + return Object(TCompressionScheme::DecompressTagged(address(), result)); } // @@ -101,12 +101,12 @@ Object CompressedObjectSlot::Release_CompareAndSwap(Object old, MaybeObject CompressedMaybeObjectSlot::operator*() const { Tagged_t value = *location(); - return MaybeObject(TCompressionScheme::DecompressTaggedAny(address(), value)); + return MaybeObject(TCompressionScheme::DecompressTagged(address(), value)); } MaybeObject CompressedMaybeObjectSlot::load(PtrComprCageBase cage_base) const { Tagged_t value = *location(); - return MaybeObject(TCompressionScheme::DecompressTaggedAny(cage_base, value)); + return MaybeObject(TCompressionScheme::DecompressTagged(cage_base, value)); } void CompressedMaybeObjectSlot::store(MaybeObject value) const { @@ -115,13 +115,13 @@ void CompressedMaybeObjectSlot::store(MaybeObject value) const { MaybeObject CompressedMaybeObjectSlot::Relaxed_Load() const { AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location()); - return MaybeObject(TCompressionScheme::DecompressTaggedAny(address(), value)); + return MaybeObject(TCompressionScheme::DecompressTagged(address(), value)); } MaybeObject CompressedMaybeObjectSlot::Relaxed_Load( PtrComprCageBase cage_base) const { AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(location()); - return MaybeObject(TCompressionScheme::DecompressTaggedAny(cage_base, value)); + return MaybeObject(TCompressionScheme::DecompressTagged(cage_base, value)); } void CompressedMaybeObjectSlot::Relaxed_Store(MaybeObject value) const { @@ -143,14 +143,14 @@ void CompressedMaybeObjectSlot::Release_CompareAndSwap( HeapObjectReference CompressedHeapObjectSlot::operator*() const { Tagged_t value = *location(); return HeapObjectReference( - TCompressionScheme::DecompressTaggedPointer(address(), value)); + TCompressionScheme::DecompressTagged(address(), value)); } HeapObjectReference CompressedHeapObjectSlot::load( PtrComprCageBase cage_base) const { Tagged_t value = *location(); return HeapObjectReference( - TCompressionScheme::DecompressTaggedPointer(cage_base, value)); + TCompressionScheme::DecompressTagged(cage_base, value)); } void CompressedHeapObjectSlot::store(HeapObjectReference value) const { @@ -161,7 +161,7 @@ HeapObject CompressedHeapObjectSlot::ToHeapObject() const { Tagged_t value = *location(); DCHECK(HAS_STRONG_HEAP_OBJECT_TAG(value)); return HeapObject::cast( - Object(TCompressionScheme::DecompressTaggedPointer(address(), value))); + Object(TCompressionScheme::DecompressTagged(address(), value))); } void CompressedHeapObjectSlot::StoreHeapObject(HeapObject value) const { @@ -176,7 +176,7 @@ template Object OffHeapCompressedObjectSlot::load( PtrComprCageBase cage_base) const { Tagged_t value = *TSlotBase::location(); - return Object(CompressionScheme::DecompressTaggedAny(cage_base, value)); + return Object(CompressionScheme::DecompressTagged(cage_base, value)); } template @@ -188,14 +188,14 @@ template Object OffHeapCompressedObjectSlot::Relaxed_Load( PtrComprCageBase cage_base) const { AtomicTagged_t value = AsAtomicTagged::Relaxed_Load(TSlotBase::location()); - return Object(CompressionScheme::DecompressTaggedAny(cage_base, value)); + return Object(CompressionScheme::DecompressTagged(cage_base, value)); } template Object OffHeapCompressedObjectSlot::Acquire_Load( PtrComprCageBase cage_base) const { AtomicTagged_t value = AsAtomicTagged::Acquire_Load(TSlotBase::location()); - return Object(CompressionScheme::DecompressTaggedAny(cage_base, value)); + return Object(CompressionScheme::DecompressTagged(cage_base, value)); } template diff --git a/src/objects/elements.cc b/src/objects/elements.cc index a403c748d8..9d9d93215a 100644 --- a/src/objects/elements.cc +++ b/src/objects/elements.cc @@ -479,8 +479,8 @@ void SortIndices(Isolate* isolate, Handle indices, AtomicSlot end(start + sort_size); std::sort(start, end, [isolate](Tagged_t elementA, Tagged_t elementB) { #ifdef V8_COMPRESS_POINTERS - Object a(V8HeapCompressionScheme::DecompressTaggedAny(isolate, elementA)); - Object b(V8HeapCompressionScheme::DecompressTaggedAny(isolate, elementB)); + Object a(V8HeapCompressionScheme::DecompressTagged(isolate, elementA)); + Object b(V8HeapCompressionScheme::DecompressTagged(isolate, elementB)); #else Object a(elementA); Object b(elementB); diff --git a/src/objects/fixed-array-inl.cpp b/src/objects/fixed-array-inl.cpp index cfaee4747c..cd8bf734f0 100644 --- a/src/objects/fixed-array-inl.cpp +++ b/src/objects/fixed-array-inl.cpp @@ -6,8 +6,10 @@ namespace v8 { namespace internal { - - +Object FixedArray::get(int index) const { + PtrComprCageBase cage_base = GetPtrComprCageBase(*this); + return get(cage_base, index); +} void FixedArray::set(int index, Smi value) { DCHECK_NE(map(), EarlyGetReadOnlyRoots().unchecked_fixed_cow_array_map()); diff --git a/src/objects/fixed-array-inl.h b/src/objects/fixed-array-inl.h index 4f84c5a567..8e93935866 100644 --- a/src/objects/fixed-array-inl.h +++ b/src/objects/fixed-array-inl.h @@ -64,11 +64,6 @@ bool FixedArray::ContainsOnlySmisOrHoles() { return true; } -Object FixedArray::get(int index) const { - PtrComprCageBase cage_base = GetPtrComprCageBase(*this); - return get(cage_base, index); -} - Object FixedArray::get(PtrComprCageBase cage_base, int index) const { DCHECK_LT(static_cast(index), static_cast(length())); return TaggedField::Relaxed_Load(cage_base, *this, diff --git a/src/objects/fixed-array.h b/src/objects/fixed-array.h index f3bc650456..91a79fec40 100644 --- a/src/objects/fixed-array.h +++ b/src/objects/fixed-array.h @@ -102,7 +102,7 @@ class FixedArray : public TorqueGeneratedFixedArray { public: // Setter and getter for elements. - inline Object get(int index) const; + Object get(int index) const; inline Object get(PtrComprCageBase cage_base, int index) const; static inline Handle get(FixedArray array, int index, diff --git a/src/objects/instance-type-inl.h b/src/objects/instance-type-inl.h index 6c848b8ebb..2a15175eb5 100644 --- a/src/objects/instance-type-inl.h +++ b/src/objects/instance-type-inl.h @@ -9,6 +9,7 @@ #include "src/execution/isolate-utils-inl.h" #include "src/objects/instance-type.h" #include "src/objects/map-inl.h" +#include "src/roots/static-roots.h" // Has to be the last include (doesn't have include guards): #include "src/objects/object-macros.h" @@ -36,47 +37,98 @@ HEAP_OBJECT_TYPE_LIST(DECL_TYPE) } // namespace InstanceTypeTraits template -inline constexpr Tagged_t StaticSingleMapOfInstanceType() { - return kNullAddress; -} - -#if V8_STATIC_ROOTS_BOOL - -inline bool CheckInstanceMap(Tagged_t expected, Map map) { - return V8HeapCompressionScheme::CompressTagged(map.ptr()) == expected; +inline constexpr base::Optional UniqueMapOfInstanceType() { + return {}; } #define INSTANCE_TYPE_MAP(V, rootIndexName, rootAccessorName, class_name) \ template <> \ - inline constexpr Tagged_t \ - StaticSingleMapOfInstanceType() { \ - return StaticReadOnlyRoot::k##rootIndexName; \ + inline constexpr base::Optional \ + UniqueMapOfInstanceType() { \ + return {RootIndex::k##rootIndexName}; \ } UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(INSTANCE_TYPE_MAP, _) #undef INSTANCE_TYPE_MAP -#else +inline constexpr base::Optional UniqueMapOfInstanceType( + InstanceType type) { + switch (type) { +#define INSTANCE_TYPE_CHECK(it, forinstancetype) \ + case forinstancetype: \ + return InstanceTypeChecker::UniqueMapOfInstanceType< \ + InstanceTypeChecker::InstanceTypeTraits::it>(); \ + INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECK); +#undef INSTANCE_TYPE_CHECK + default: { + } + } + return {}; +} -inline bool CheckInstanceMap(Tagged_t expected, Map map) { UNREACHABLE(); } +#if V8_STATIC_ROOTS_BOOL + +inline bool CheckInstanceMap(RootIndex expected, Map map) { + return V8HeapCompressionScheme::CompressTagged(map.ptr()) == + StaticReadOnlyRootsPointerTable[static_cast(expected)]; +} + +inline bool CheckInstanceMapRange(std::pair expected, + Map map) { + Tagged_t ptr = V8HeapCompressionScheme::CompressTagged(map.ptr()); + Tagged_t first = + StaticReadOnlyRootsPointerTable[static_cast(expected.first)]; + Tagged_t last = + StaticReadOnlyRootsPointerTable[static_cast(expected.second)]; + return ptr >= first && ptr <= last; +} #endif // V8_STATIC_ROOTS_BOOL // Define type checkers for classes with single instance type. -#define INSTANCE_TYPE_CHECKER(type, forinstancetype) \ - V8_INLINE constexpr bool Is##type(InstanceType instance_type) { \ - return instance_type == forinstancetype; \ - } \ - V8_INLINE bool Is##type(Map map_object) { \ - if (Tagged_t expected = \ - StaticSingleMapOfInstanceType()) { \ - bool res = CheckInstanceMap(expected, map_object); \ - SLOW_DCHECK(Is##type(map_object.instance_type()) == res); \ - return res; \ - } \ - return Is##type(map_object.instance_type()); \ +// INSTANCE_TYPE_CHECKER1 is to be used if the instance type is already loaded. +// INSTANCE_TYPE_CHECKER2 is preferred since it can sometimes avoid loading the +// instance type from the map, if the checked instance type corresponds to a +// known map or range of maps. + +#define INSTANCE_TYPE_CHECKER1(type, forinstancetype) \ + V8_INLINE constexpr bool Is##type(InstanceType instance_type) { \ + return instance_type == forinstancetype; \ } -INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER) -#undef INSTANCE_TYPE_CHECKER + +#if V8_STATIC_ROOTS_BOOL + +#define INSTANCE_TYPE_CHECKER2(type, forinstancetype_) \ + V8_INLINE bool Is##type(Map map_object) { \ + InstanceType forinstancetype = \ + static_cast(forinstancetype_); \ + if (base::Optional expected = \ + UniqueMapOfInstanceType()) { \ + bool res = CheckInstanceMap(*expected, map_object); \ + SLOW_DCHECK(Is##type(map_object.instance_type()) == res); \ + return res; \ + } \ + if (base::Optional> range = \ + StaticReadOnlyRootMapRange(forinstancetype)) { \ + bool res = CheckInstanceMapRange(*range, map_object); \ + SLOW_DCHECK(Is##type(map_object.instance_type()) == res); \ + return res; \ + } \ + return Is##type(map_object.instance_type()); \ + } + +#else + +#define INSTANCE_TYPE_CHECKER2(type, forinstancetype) \ + V8_INLINE bool Is##type(Map map_object) { \ + return Is##type(map_object.instance_type()); \ + } + +#endif // V8_STATIC_ROOTS_BOOL + +INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER1) +INSTANCE_TYPE_CHECKERS_SINGLE(INSTANCE_TYPE_CHECKER2) +#undef INSTANCE_TYPE_CHECKER1 +#undef INSTANCE_TYPE_CHECKER2 // Checks if value is in range [lower_limit, higher_limit] using a single // branch. Assumes that the input instance type is valid. @@ -102,17 +154,45 @@ struct InstanceRangeChecker { }; // Define type checkers for classes with ranges of instance types. -#define INSTANCE_TYPE_CHECKER_RANGE(type, first_instance_type, \ - last_instance_type) \ +// INSTANCE_TYPE_CHECKER_RANGE1 is to be used if the instance type is already +// loaded. INSTANCE_TYPE_CHECKER_RANGE2 is preferred since it can sometimes +// avoid loading the instance type from the map, if the checked instance type +// range corresponds to a known range of maps. + +#define INSTANCE_TYPE_CHECKER_RANGE1(type, first_instance_type, \ + last_instance_type) \ V8_INLINE constexpr bool Is##type(InstanceType instance_type) { \ return InstanceRangeChecker::Check(instance_type); \ - } \ - V8_INLINE bool Is##type(Map map_object) { \ - return Is##type(map_object.instance_type()); \ } -INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE) -#undef INSTANCE_TYPE_CHECKER_RANGE + +#if V8_STATIC_ROOTS_BOOL + +#define INSTANCE_TYPE_CHECKER_RANGE2(type, first_instance_type, \ + last_instance_type) \ + V8_INLINE bool Is##type(Map map_object) { \ + if (base::Optional> range = \ + StaticReadOnlyRootMapRange(first_instance_type, \ + last_instance_type)) { \ + return CheckInstanceMapRange(*range, map_object); \ + } \ + return Is##type(map_object.instance_type()); \ + } + +#else + +#define INSTANCE_TYPE_CHECKER_RANGE2(type, first_instance_type, \ + last_instance_type) \ + V8_INLINE bool Is##type(Map map_object) { \ + return Is##type(map_object.instance_type()); \ + } + +#endif // V8_STATIC_ROOTS_BOOL + +INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE1) +INSTANCE_TYPE_CHECKERS_RANGE(INSTANCE_TYPE_CHECKER_RANGE2) +#undef INSTANCE_TYPE_CHECKER_RANGE1 +#undef INSTANCE_TYPE_CHECKER_RANGE2 V8_INLINE constexpr bool IsHeapObject(InstanceType instance_type) { return true; @@ -125,7 +205,12 @@ V8_INLINE constexpr bool IsInternalizedString(InstanceType instance_type) { } V8_INLINE bool IsInternalizedString(Map map_object) { +#if V8_STATIC_ROOTS_BOOL + return CheckInstanceMapRange( + *StaticReadOnlyRootMapRange(INTERNALIZED_STRING_TYPE), map_object); +#else return IsInternalizedString(map_object.instance_type()); +#endif } V8_INLINE constexpr bool IsExternalString(InstanceType instance_type) { diff --git a/src/objects/instance-type.h b/src/objects/instance-type.h index e49ed9b352..1e162f2e67 100644 --- a/src/objects/instance-type.h +++ b/src/objects/instance-type.h @@ -340,10 +340,16 @@ INSTANCE_TYPE_CHECKERS(IS_TYPE_FUNCTION_DECL) // This list must contain only maps that are shared by all objects of their // instance type. -#define UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \ - UNIQUE_LEAF_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \ - V(_, HeapNumberMap, heap_number_map, HeapNumber) \ - V(_, WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArray) \ +#define UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \ + UNIQUE_LEAF_INSTANCE_TYPE_MAP_LIST_GENERATOR(V, _) \ + V(_, ByteArrayMap, byte_array_map, ByteArray) \ + V(_, NameDictionaryMap, name_dictionary_map, NameDictionary) \ + V(_, OrderedNameDictionaryMap, ordered_name_dictionary_map, \ + OrderedNameDictionary) \ + V(_, GlobalDictionaryMap, global_dictionary_map, GlobalDictionary) \ + V(_, GlobalPropertyCellMap, global_property_cell_map, PropertyCell) \ + V(_, HeapNumberMap, heap_number_map, HeapNumber) \ + V(_, WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArray) \ TORQUE_DEFINED_MAP_CSA_LIST_GENERATOR(V, _) } // namespace internal diff --git a/src/objects/maybe-object-inl.h b/src/objects/maybe-object-inl.h index f5dd961487..2e3616a6eb 100644 --- a/src/objects/maybe-object-inl.h +++ b/src/objects/maybe-object-inl.h @@ -84,7 +84,7 @@ HeapObjectReference HeapObjectReference::ClearedValue( #ifdef V8_COMPRESS_POINTERS // This is necessary to make pointer decompression computation also // suitable for cleared weak references. - Address raw_value = V8HeapCompressionScheme::DecompressTaggedPointer( + Address raw_value = V8HeapCompressionScheme::DecompressTagged( cage_base, kClearedWeakHeapObjectLower32); #else Address raw_value = kClearedWeakHeapObjectLower32; diff --git a/src/objects/string.cc b/src/objects/string.cc index 9c33403383..3b6e19eb63 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -388,13 +388,14 @@ void String::MakeExternalDuringGC(Isolate* isolate, T* resource) { // Byte size of the external String object. int new_size = this->SizeFromMap(new_map); - // Shared strings are never indirect or large. - DCHECK(!isolate->heap()->IsLargeObject(*this)); + // Shared strings are never indirect. DCHECK(!StringShape(*this).IsIndirect()); - isolate->heap()->NotifyObjectSizeChange(*this, size, new_size, - ClearRecordedSlots::kNo, - UpdateInvalidatedObjectSize::kNo); + if (!isolate->heap()->IsLargeObject(*this)) { + isolate->heap()->NotifyObjectSizeChange(*this, size, new_size, + ClearRecordedSlots::kNo, + UpdateInvalidatedObjectSize::kNo); + } // The external pointer slots must be initialized before the new map is // installed. Otherwise, a GC marking thread may see the new map before the diff --git a/src/objects/tagged-field-inl.h b/src/objects/tagged-field-inl.h index 1c8e9a8f0c..0d2de548e5 100644 --- a/src/objects/tagged-field-inl.h +++ b/src/objects/tagged-field-inl.h @@ -35,10 +35,9 @@ Address TaggedField::tagged_to_full( if (kIsSmi) { return CompressionScheme::DecompressTaggedSigned(tagged_value); } else if (kIsHeapObject) { - return CompressionScheme::DecompressTaggedPointer(on_heap_addr, - tagged_value); + return CompressionScheme::DecompressTagged(on_heap_addr, tagged_value); } else { - return CompressionScheme::DecompressTaggedAny(on_heap_addr, tagged_value); + return CompressionScheme::DecompressTagged(on_heap_addr, tagged_value); } #else return tagged_value; diff --git a/src/objects/tagged-impl-inl.h b/src/objects/tagged-impl-inl.h index 4ce915730d..9a44749949 100644 --- a/src/objects/tagged-impl-inl.h +++ b/src/objects/tagged-impl-inl.h @@ -112,9 +112,8 @@ bool TaggedImpl::GetHeapObjectIfStrong( if (kIsFull) return GetHeapObjectIfStrong(result); // Implementation for compressed pointers. if (IsStrong()) { - *result = - HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer( - isolate, static_cast(ptr_)))); + *result = HeapObject::cast(Object(CompressionScheme::DecompressTagged( + isolate, static_cast(ptr_)))); return true; } return false; @@ -138,7 +137,7 @@ HeapObject TaggedImpl::GetHeapObjectAssumeStrong( if (kIsFull) return GetHeapObjectAssumeStrong(); // Implementation for compressed pointers. DCHECK(IsStrong()); - return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer( + return HeapObject::cast(Object(CompressionScheme::DecompressTagged( isolate, static_cast(ptr_)))); } @@ -224,11 +223,11 @@ HeapObject TaggedImpl::GetHeapObject( DCHECK(!IsSmi()); if (kCanBeWeak) { DCHECK(!IsCleared()); - return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer( + return HeapObject::cast(Object(CompressionScheme::DecompressTagged( isolate, static_cast(ptr_) & ~kWeakHeapObjectMask))); } else { DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(ptr_)); - return HeapObject::cast(Object(CompressionScheme::DecompressTaggedPointer( + return HeapObject::cast(Object(CompressionScheme::DecompressTagged( isolate, static_cast(ptr_)))); } } diff --git a/src/objects/tagged-value-inl.h b/src/objects/tagged-value-inl.h index 4ca8739367..84f4c93ec2 100644 --- a/src/objects/tagged-value-inl.h +++ b/src/objects/tagged-value-inl.h @@ -30,7 +30,7 @@ inline StrongTaggedValue::StrongTaggedValue(Object o) Object StrongTaggedValue::ToObject(Isolate* isolate, StrongTaggedValue object) { #ifdef V8_COMPRESS_POINTERS - return Object(CompressionScheme::DecompressTaggedAny(isolate, object.ptr())); + return Object(CompressionScheme::DecompressTagged(isolate, object.ptr())); #else return Object(object.ptr()); #endif @@ -49,7 +49,7 @@ inline TaggedValue::TaggedValue(MaybeObject o) MaybeObject TaggedValue::ToMaybeObject(Isolate* isolate, TaggedValue object) { #ifdef V8_COMPRESS_POINTERS return MaybeObject( - CompressionScheme::DecompressTaggedAny(isolate, object.ptr())); + CompressionScheme::DecompressTagged(isolate, object.ptr())); #else return MaybeObject(object.ptr()); #endif diff --git a/src/objects/visitors.h b/src/objects/visitors.h index d28e9d6e81..4ae9ad2b26 100644 --- a/src/objects/visitors.h +++ b/src/objects/visitors.h @@ -91,12 +91,14 @@ class RootVisitor { UNREACHABLE(); } - // Visits a single pointer which is InstructionStream from the execution - // stack. - virtual void VisitRunningCode(FullObjectSlot p) { - // For most visitors, currently running InstructionStream is no different - // than any other on-stack pointer. - VisitRootPointer(Root::kStackRoots, nullptr, p); + // Visits a running Code object and potentially its associated + // InstructionStream from the execution stack. + virtual void VisitRunningCode(FullObjectSlot code_slot, + FullObjectSlot istream_or_smi_zero_slot) { + // For most visitors, currently running code is no different than any other + // on-stack pointer. + VisitRootPointer(Root::kStackRoots, nullptr, istream_or_smi_zero_slot); + VisitRootPointer(Root::kStackRoots, nullptr, code_slot); } // Intended for serialization/deserialization checking: insert, or diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc index 5bac4fff08..e82e8b4170 100644 --- a/src/profiler/heap-snapshot-generator.cc +++ b/src/profiler/heap-snapshot-generator.cc @@ -2064,37 +2064,17 @@ class RootsReferencesExtractor : public RootVisitor { } } - void VisitRunningCode(FullObjectSlot p) override { - // Must match behavior in - // MarkCompactCollector::RootMarkingVisitor::VisitRunningCode, which treats - // deoptimization literals in running code as stack roots. - HeapObject value = HeapObject::cast(*p); - if (!IsCodeSpaceObject(value)) { - // When external code space is enabled, the slot might contain a - // Code object representing an embedded builtin, which - // doesn't require additional processing. - DCHECK(!Code::cast(value).has_instruction_stream()); - } else { - InstructionStream code = InstructionStream::cast(value); - if (code.kind() != CodeKind::BASELINE) { - DeoptimizationData deopt_data = - DeoptimizationData::cast(code.deoptimization_data()); - if (deopt_data.length() > 0) { - DeoptimizationLiteralArray literals = deopt_data.LiteralArray(); - int literals_length = literals.length(); - for (int i = 0; i < literals_length; ++i) { - MaybeObject maybe_literal = literals.Get(i); - HeapObject heap_literal; - if (maybe_literal.GetHeapObject(&heap_literal)) { - VisitRootPointer(Root::kStackRoots, nullptr, - FullObjectSlot(&heap_literal)); - } - } - } - } + // Keep this synced with + // MarkCompactCollector::RootMarkingVisitor::VisitRunningCode. + void VisitRunningCode(FullObjectSlot code_slot, + FullObjectSlot istream_or_smi_zero_slot) final { + Object istream_or_smi_zero = *istream_or_smi_zero_slot; + if (istream_or_smi_zero != Smi::zero()) { + InstructionStream istream = InstructionStream::cast(istream_or_smi_zero); + istream.IterateDeoptimizationLiterals(this); + VisitRootPointer(Root::kStackRoots, nullptr, istream_or_smi_zero_slot); } - // Finally visit the InstructionStream itself. - VisitRootPointer(Root::kStackRoots, nullptr, p); + VisitRootPointer(Root::kStackRoots, nullptr, code_slot); } private: diff --git a/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/src/regexp/arm64/regexp-macro-assembler-arm64.cc index a265797e24..1739586736 100644 --- a/src/regexp/arm64/regexp-macro-assembler-arm64.cc +++ b/src/regexp/arm64/regexp-macro-assembler-arm64.cc @@ -814,7 +814,7 @@ Handle RegExpMacroAssemblerARM64::GetCode(Handle source) { DCHECK_EQ(registers_to_retain.Count(), kNumCalleeSavedRegisters); __ PushCPURegList(registers_to_retain); - __ Push(lr, fp); + __ Push(lr, fp); __ PushCPURegList(argument_registers); // Set frame pointer in place. @@ -1125,7 +1125,7 @@ Handle RegExpMacroAssemblerARM64::GetCode(Handle source) { // Set stack pointer back to first register to retain. __ Mov(sp, fp); - __ Pop(fp, lr); + __ Pop(fp, lr); // Restore registers. __ PopCPURegList(registers_to_retain); @@ -1656,14 +1656,14 @@ void RegExpMacroAssemblerARM64::CallIf(Label* to, Condition condition) { void RegExpMacroAssemblerARM64::RestoreLinkRegister() { // TODO(v8:10026): Remove when we stop compacting for code objects that are // active on the call stack. - __ Pop(padreg, lr); + __ Pop(padreg, lr); __ Add(lr, lr, Operand(masm_->CodeObject())); } void RegExpMacroAssemblerARM64::SaveLinkRegister() { __ Sub(lr, lr, Operand(masm_->CodeObject())); - __ Push(lr, padreg); + __ Push(lr, padreg); } diff --git a/src/regexp/regexp-ast.cc b/src/regexp/regexp-ast.cc index ba9c8f0d1f..d68d7fec26 100644 --- a/src/regexp/regexp-ast.cc +++ b/src/regexp/regexp-ast.cc @@ -3,7 +3,9 @@ // found in the LICENSE file. #include "src/regexp/regexp-ast.h" + #include "src/utils/ostreams.h" +#include "src/zone/zone-list-inl.h" namespace v8 { namespace internal { @@ -200,10 +202,12 @@ void* RegExpUnparser::VisitClassSetOperand(RegExpClassSetOperand* that, if (i > 0) os_ << " "; VisitCharacterRange(that->ranges()->at(i)); } - for (auto iter : *that->strings()) { - os_ << " '"; - os_ << std::string(iter.first.begin(), iter.first.end()); - os_ << "'"; + if (that->has_strings()) { + for (auto iter : *that->strings()) { + os_ << " '"; + os_ << std::string(iter.first.begin(), iter.first.end()); + os_ << "'"; + } } os_ << "]"; return nullptr; @@ -382,16 +386,17 @@ RegExpClassSetOperand::RegExpClassSetOperand(ZoneList* ranges, CharacterClassStrings* strings) : ranges_(ranges), strings_(strings) { DCHECK_NOT_NULL(ranges); - DCHECK_NOT_NULL(strings); min_match_ = 0; max_match_ = 0; if (!ranges->is_empty()) { min_match_ = 1; max_match_ = 2; } - for (auto string : *strings) { - min_match_ = std::min(min_match_, string.second->min_match()); - max_match_ = std::max(max_match_, string.second->max_match()); + if (has_strings()) { + for (auto string : *strings) { + min_match_ = std::min(min_match_, string.second->min_match()); + max_match_ = std::max(max_match_, string.second->max_match()); + } } } @@ -410,5 +415,20 @@ RegExpClassSetExpression::RegExpClassSetExpression( } } +// static +RegExpClassSetExpression* RegExpClassSetExpression::Empty(Zone* zone, + bool is_negated) { + ZoneList* ranges = + zone->template New>(0, zone); + RegExpClassSetOperand* op = + zone->template New(ranges, nullptr); + ZoneList* operands = + zone->template New>(1, zone); + operands->Add(op, zone); + return zone->template New( + RegExpClassSetExpression::OperationType::kUnion, is_negated, false, + operands); +} + } // namespace internal } // namespace v8 diff --git a/src/regexp/regexp-ast.h b/src/regexp/regexp-ast.h index 6939fde07f..34f59f6c31 100644 --- a/src/regexp/regexp-ast.h +++ b/src/regexp/regexp-ast.h @@ -413,9 +413,12 @@ class RegExpClassSetOperand final : public RegExpTree { void Subtract(RegExpClassSetOperand* other, ZoneList* temp_ranges, Zone* zone); - bool has_strings() const { return !strings_->empty(); } + bool has_strings() const { return strings_ != nullptr && !strings_->empty(); } ZoneList* ranges() { return ranges_; } - CharacterClassStrings* strings() { return strings_; } + CharacterClassStrings* strings() { + DCHECK_NOT_NULL(strings_); + return strings_; + } private: ZoneList* ranges_; @@ -434,6 +437,10 @@ class RegExpClassSetExpression final : public RegExpTree { DECL_BOILERPLATE(ClassSetExpression); + // Create an empty class set expression (matches everything if |is_negated|, + // nothing otherwise). + static RegExpClassSetExpression* Empty(Zone* zone, bool is_negated); + bool IsTextElement() override { return true; } int min_match() override { return 0; } int max_match() override { return max_match_; } diff --git a/src/regexp/regexp-compiler-tonode.cc b/src/regexp/regexp-compiler-tonode.cc index 54e57298da..3258bb5149 100644 --- a/src/regexp/regexp-compiler-tonode.cc +++ b/src/regexp/regexp-compiler-tonode.cc @@ -593,7 +593,12 @@ RegExpNode* RegExpClassSetExpression::ToNode(RegExpCompiler* compiler, void RegExpClassSetOperand::Union(RegExpClassSetOperand* other, Zone* zone) { ranges()->AddAll(*other->ranges(), zone); - strings()->insert(other->strings()->begin(), other->strings()->end()); + if (other->has_strings()) { + if (strings_ == nullptr) { + strings_ = zone->template New(zone); + } + strings()->insert(other->strings()->begin(), other->strings()->end()); + } } void RegExpClassSetOperand::Intersect(RegExpClassSetOperand* other, @@ -602,11 +607,17 @@ void RegExpClassSetOperand::Intersect(RegExpClassSetOperand* other, CharacterRange::Intersect(ranges(), other->ranges(), temp_ranges, zone); std::swap(*ranges(), *temp_ranges); temp_ranges->Rewind(0); - for (auto iter = strings()->begin(); iter != strings()->end();) { - if (other->strings()->find(iter->first) == other->strings()->end()) { - iter = strings()->erase(iter); + if (has_strings()) { + if (!other->has_strings()) { + strings()->clear(); } else { - iter++; + for (auto iter = strings()->begin(); iter != strings()->end();) { + if (other->strings()->find(iter->first) == other->strings()->end()) { + iter = strings()->erase(iter); + } else { + iter++; + } + } } } } @@ -617,11 +628,13 @@ void RegExpClassSetOperand::Subtract(RegExpClassSetOperand* other, CharacterRange::Subtract(ranges(), other->ranges(), temp_ranges, zone); std::swap(*ranges(), *temp_ranges); temp_ranges->Rewind(0); - for (auto iter = strings()->begin(); iter != strings()->end();) { - if (other->strings()->find(iter->first) != other->strings()->end()) { - iter = strings()->erase(iter); - } else { - iter++; + if (has_strings() && other->has_strings()) { + for (auto iter = strings()->begin(); iter != strings()->end();) { + if (other->strings()->find(iter->first) != other->strings()->end()) { + iter = strings()->erase(iter); + } else { + iter++; + } } } } diff --git a/src/regexp/regexp-parser.cc b/src/regexp/regexp-parser.cc index fff26b4a58..f7707e80a1 100644 --- a/src/regexp/regexp-parser.cc +++ b/src/regexp/regexp-parser.cc @@ -429,8 +429,8 @@ class RegExpParserState : public ZoneObject { template class RegExpParserImpl final { private: - RegExpParserImpl(const CharT* input, int input_length, RegExpFlags flags, - uintptr_t stack_limit, Zone* zone, + RegExpParserImpl(Isolate* isolate, const CharT* input, int input_length, + RegExpFlags flags, uintptr_t stack_limit, Zone* zone, const DisallowGarbageCollection& no_gc); bool Parse(RegExpCompileData* result); @@ -563,6 +563,7 @@ class RegExpParserImpl final { bool HasNamedCaptures(InClassEscapeState in_class_escape_state); Zone* zone() const { return zone_; } + Isolate* isolate() const { return isolate_; } base::uc32 current() const { return current_; } bool has_more() const { return has_more_; } @@ -603,6 +604,10 @@ class RegExpParserImpl final { const DisallowGarbageCollection no_gc_; Zone* const zone_; + // TODO(pthier, v8:11935): Isolate is only used to increment the UseCounter + // for unicode set incompabilities in unicode mode. Remove when the counter + // is removed. + Isolate* const isolate_; RegExpError error_ = RegExpError::kNone; int error_pos_ = 0; ZoneList* captures_; @@ -629,9 +634,10 @@ class RegExpParserImpl final { template RegExpParserImpl::RegExpParserImpl( - const CharT* input, int input_length, RegExpFlags flags, + Isolate* isolate, const CharT* input, int input_length, RegExpFlags flags, uintptr_t stack_limit, Zone* zone, const DisallowGarbageCollection& no_gc) : zone_(zone), + isolate_(isolate), captures_(nullptr), named_captures_(nullptr), named_back_references_(nullptr), @@ -2417,6 +2423,21 @@ void RegExpParserImpl::ParseClassEscape( if (current() != '\\') { // Not a ClassEscape. *char_out = current(); + // Count usages of patterns that would break when replacing /u with /v. + // This is only temporarily enabled and should give us an idea if it is + // feasible to enable unicode sets for usage in the pattern attribute. + // TODO(pthier, v8:11935): Remove for M113. + // IsUnicodeMode() is true for both /u and /v, but this method is only + // called for /u. + if (IsUnicodeMode() && isolate() != nullptr) { + const bool unicode_sets_invalid = + IsClassSetSyntaxCharacter(*char_out) || + IsClassSetReservedDoublePunctuator(*char_out); + if (unicode_sets_invalid) { + isolate()->CountUsage( + v8::Isolate::kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode); + } + } Advance(); return; } @@ -2892,10 +2913,14 @@ RegExpTree* RegExpParserImpl::ParseCharacterClass( zone()->template New>(2, zone()); if (current() == ']') { Advance(); - RegExpClassRanges::ClassRangesFlags class_ranges_flags; - if (is_negated) class_ranges_flags = RegExpClassRanges::NEGATED; - return zone()->template New(zone(), ranges, - class_ranges_flags); + if (unicode_sets()) { + return RegExpClassSetExpression::Empty(zone(), is_negated); + } else { + RegExpClassRanges::ClassRangesFlags class_ranges_flags; + if (is_negated) class_ranges_flags = RegExpClassRanges::NEGATED; + return zone()->template New(zone(), ranges, + class_ranges_flags); + } } if (!unicode_sets()) { @@ -3113,13 +3138,13 @@ bool RegExpParser::ParseRegExpFromHeapString(Isolate* isolate, Zone* zone, String::FlatContent content = input->GetFlatContent(no_gc); if (content.IsOneByte()) { base::Vector v = content.ToOneByteVector(); - return RegExpParserImpl{v.begin(), v.length(), flags, - stack_limit, zone, no_gc} + return RegExpParserImpl{isolate, v.begin(), v.length(), flags, + stack_limit, zone, no_gc} .Parse(result); } else { base::Vector v = content.ToUC16Vector(); - return RegExpParserImpl{v.begin(), v.length(), flags, - stack_limit, zone, no_gc} + return RegExpParserImpl{ + isolate, v.begin(), v.length(), flags, stack_limit, zone, no_gc} .Parse(result); } } @@ -3131,8 +3156,14 @@ bool RegExpParser::VerifyRegExpSyntax(Zone* zone, uintptr_t stack_limit, RegExpFlags flags, RegExpCompileData* result, const DisallowGarbageCollection& no_gc) { - return RegExpParserImpl{input, input_length, flags, - stack_limit, zone, no_gc} + // TODO(pthier, v8:11935): Isolate is only temporarily used to increment the + // UseCounter for unicode set incompabilities in unicode mode. + // This method is only used in the parser for early-errors. To avoid passing + // the isolate through we simply pass a nullptr. This also has the positive + // side-effect of not incrementing the UseCounter multiple times. + Isolate* isolate = nullptr; + return RegExpParserImpl{isolate, input, input_length, flags, + stack_limit, zone, no_gc} .Parse(result); } diff --git a/src/roots/roots-inl.h b/src/roots/roots-inl.h index 232566f246..e20affe608 100644 --- a/src/roots/roots-inl.h +++ b/src/roots/roots-inl.h @@ -125,7 +125,7 @@ void ReadOnlyRoots::VerifyNameForProtectorsPages() const { Address ReadOnlyRoots::at(RootIndex root_index) const { #if V8_STATIC_ROOTS_BOOL - return V8HeapCompressionScheme::DecompressTaggedPointer( + return V8HeapCompressionScheme::DecompressTagged( V8HeapCompressionScheme::base(), StaticReadOnlyRootsPointerTable[static_cast(root_index)]); #else diff --git a/src/roots/roots.cc b/src/roots/roots.cc index a73b0e96e4..434f1749f0 100644 --- a/src/roots/roots.cc +++ b/src/roots/roots.cc @@ -75,8 +75,7 @@ void ReadOnlyRoots::InitFromStaticRootsTable(Address cage_base) { #if V8_STATIC_ROOTS_BOOL RootIndex pos = RootIndex::kFirstReadOnlyRoot; for (auto element : StaticReadOnlyRootsPointerTable) { - auto ptr = - V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, element); + auto ptr = V8HeapCompressionScheme::DecompressTagged(cage_base, element); DCHECK(!is_initialized(pos)); read_only_roots_[static_cast(pos)] = ptr; ++pos; diff --git a/src/roots/static-roots.h b/src/roots/static-roots.h index 875dce578b..4e8188c891 100644 --- a/src/roots/static-roots.h +++ b/src/roots/static-roots.h @@ -12,6 +12,9 @@ #if V8_STATIC_ROOTS_BOOL +#include "src/objects/instance-type.h" +#include "src/roots/roots.h" + // Disabling Wasm or Intl invalidates the contents of static-roots.h. // TODO(olivf): To support static roots for multiple build configurations we // will need to generate target specific versions of this file. @@ -92,19 +95,19 @@ struct StaticReadOnlyRoot { static constexpr Tagged_t kClosureFeedbackCellArrayMap = 0x2b05; static constexpr Tagged_t kFeedbackVectorMap = 0x2b2d; static constexpr Tagged_t kHeapNumberMap = 0x2b55; - static constexpr Tagged_t kSymbolMap = 0x2b7d; - static constexpr Tagged_t kForeignMap = 0x2ba5; - static constexpr Tagged_t kMegaDomHandlerMap = 0x2bcd; - static constexpr Tagged_t kBooleanMap = 0x2bf5; - static constexpr Tagged_t kUninitializedMap = 0x2c1d; - static constexpr Tagged_t kArgumentsMarkerMap = 0x2c45; - static constexpr Tagged_t kExceptionMap = 0x2c6d; - static constexpr Tagged_t kTerminationExceptionMap = 0x2c95; - static constexpr Tagged_t kOptimizedOutMap = 0x2cbd; - static constexpr Tagged_t kStaleRegisterMap = 0x2ce5; - static constexpr Tagged_t kSelfReferenceMarkerMap = 0x2d0d; - static constexpr Tagged_t kBasicBlockCountersMarkerMap = 0x2d35; - static constexpr Tagged_t kBigIntMap = 0x2d5d; + static constexpr Tagged_t kForeignMap = 0x2b7d; + static constexpr Tagged_t kMegaDomHandlerMap = 0x2ba5; + static constexpr Tagged_t kBooleanMap = 0x2bcd; + static constexpr Tagged_t kUninitializedMap = 0x2bf5; + static constexpr Tagged_t kArgumentsMarkerMap = 0x2c1d; + static constexpr Tagged_t kExceptionMap = 0x2c45; + static constexpr Tagged_t kTerminationExceptionMap = 0x2c6d; + static constexpr Tagged_t kOptimizedOutMap = 0x2c95; + static constexpr Tagged_t kStaleRegisterMap = 0x2cbd; + static constexpr Tagged_t kSelfReferenceMarkerMap = 0x2ce5; + static constexpr Tagged_t kBasicBlockCountersMarkerMap = 0x2d0d; + static constexpr Tagged_t kBigIntMap = 0x2d35; + static constexpr Tagged_t kSymbolMap = 0x2d5d; static constexpr Tagged_t kStringMap = 0x2d85; static constexpr Tagged_t kOneByteStringMap = 0x2dad; static constexpr Tagged_t kConsStringMap = 0x2dd5; @@ -1504,6 +1507,34 @@ static constexpr std::array StaticReadOnlyRootsPointerTable = { StaticReadOnlyRoot::kStoreHandler3Map, }; +inline constexpr base::Optional> +StaticReadOnlyRootMapRange(InstanceType type) { + switch (type) { + case INTERNALIZED_STRING_TYPE: + return {{RootIndex::kInternalizedStringMap, + RootIndex::kUncachedExternalOneByteInternalizedStringMap}}; + case ALLOCATION_SITE_TYPE: + return {{RootIndex::kAllocationSiteWithWeakNextMap, + RootIndex::kAllocationSiteWithoutWeakNextMap}}; + default: { + } + } + return {}; +} + +inline constexpr base::Optional> +StaticReadOnlyRootMapRange(InstanceType first, InstanceType last) { + if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) { + return {{RootIndex::kStringMap, RootIndex::kSharedThinOneByteStringMap}}; + } + if (first == FIRST_NAME_TYPE && last == LAST_NAME_TYPE) { + return {{RootIndex::kSymbolMap, RootIndex::kSharedThinOneByteStringMap}}; + } + return {}; +} + +static constexpr size_t kStaticReadOnlyRootRangesHash = 4014968950881612012UL; + } // namespace internal } // namespace v8 #endif // V8_STATIC_ROOTS_BOOL diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 7be644a327..95078dd1d2 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -1400,5 +1400,13 @@ RUNTIME_FUNCTION(Runtime_WasmStringFromCodePoint) { return *result; } +RUNTIME_FUNCTION(Runtime_WasmStringHash) { + ClearThreadInWasmScope flag_scope(isolate); + DCHECK_EQ(1, args.length()); + String string(String::cast(args[0])); + uint32_t hash = string.EnsureHash(); + return Smi::FromInt(static_cast(hash)); +} + } // namespace internal } // namespace v8 diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index eaf0a34f19..6ef5af2dfc 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -654,7 +654,8 @@ namespace internal { F(WasmStringViewWtf8Encode, 6, 1) \ F(WasmStringViewWtf8Slice, 3, 1) \ F(WasmStringCompare, 2, 1) \ - F(WasmStringFromCodePoint, 1, 1) + F(WasmStringFromCodePoint, 1, 1) \ + F(WasmStringHash, 1, 1) #define FOR_EACH_INTRINSIC_WASM_TEST(F, I) \ F(DeserializeWasmModule, 2, 1) \ diff --git a/src/snapshot/mksnapshot.cc b/src/snapshot/mksnapshot.cc index bc731c0d78..c3fbdeb114 100644 --- a/src/snapshot/mksnapshot.cc +++ b/src/snapshot/mksnapshot.cc @@ -15,6 +15,7 @@ #include "src/base/platform/wrappers.h" #include "src/base/vector.h" #include "src/codegen/cpu-features.h" +#include "src/common/globals.h" #include "src/flags/flags.h" #include "src/snapshot/embedded/embedded-file-writer.h" #include "src/snapshot/snapshot.h" @@ -293,6 +294,8 @@ int main(int argc, char** argv) { if (i::v8_flags.static_roots_src) { i::StaticRootsTableGen::write(i_isolate, i::v8_flags.static_roots_src); + } else if (V8_STATIC_ROOTS_BOOL) { + i::StaticRootsTableGen::VerifyRanges(i_isolate); } } diff --git a/src/snapshot/serializer.h b/src/snapshot/serializer.h index 8063da7726..cfaf9300b5 100644 --- a/src/snapshot/serializer.h +++ b/src/snapshot/serializer.h @@ -118,10 +118,10 @@ class CodeAddressMap : public CodeEventLogger { base::HashMap impl_; }; - void LogRecordedBuffer(Handle code, - MaybeHandle, const char* name, - int length) override { - address_to_name_map_.Insert(code->address(), name, length); + void LogRecordedBuffer(AbstractCode code, MaybeHandle, + const char* name, int length) override { + DisallowGarbageCollection no_gc; + address_to_name_map_.Insert(code.address(), name, length); } #if V8_ENABLE_WEBASSEMBLY diff --git a/src/snapshot/static-roots-gen.cc b/src/snapshot/static-roots-gen.cc index 5241c3982b..7d7da1f206 100644 --- a/src/snapshot/static-roots-gen.cc +++ b/src/snapshot/static-roots-gen.cc @@ -6,14 +6,199 @@ #include +#include "src/common/globals.h" #include "src/common/ptr-compr-inl.h" #include "src/execution/isolate.h" +#include "src/objects/instance-type-inl.h" +#include "src/objects/instance-type.h" +#include "src/objects/objects-definitions.h" +#include "src/objects/visitors.h" #include "src/roots/roots-inl.h" #include "src/roots/roots.h" +#include "src/roots/static-roots.h" namespace v8 { namespace internal { +class StaticRootsTableGenImpl { + public: + explicit StaticRootsTableGenImpl(Isolate* isolate) { + // Define some object type ranges of interest + // + // These are manually curated lists of objects that are explicitly placed + // next to each other on the read only heap and also correspond to important + // instance type ranges. + + std::list string, internalized_string; +#define ELEMENT(type, size, name, CamelName) \ + string.push_back(RootIndex::k##CamelName##Map); \ + if (InstanceTypeChecker::IsInternalizedString(type)) { \ + internalized_string.push_back(RootIndex::k##CamelName##Map); \ + } + STRING_TYPE_LIST(ELEMENT) +#undef ELEMENT + + root_ranges_.emplace_back("FIRST_STRING_TYPE", "LAST_STRING_TYPE", string); + root_ranges_.emplace_back("INTERNALIZED_STRING_TYPE", internalized_string); + + CHECK_EQ(LAST_NAME_TYPE, SYMBOL_TYPE); + CHECK_EQ(LAST_STRING_TYPE + 1, SYMBOL_TYPE); + string.push_back(RootIndex::kSymbolMap); + root_ranges_.emplace_back("FIRST_NAME_TYPE", "LAST_NAME_TYPE", string); + + std::list allocation_site; +#define ELEMENT(_1, _2, CamelName) \ + allocation_site.push_back(RootIndex::k##CamelName); + ALLOCATION_SITE_MAPS_LIST(ELEMENT); +#undef ELEMENT + root_ranges_.emplace_back("ALLOCATION_SITE_TYPE", allocation_site); + + // Collect all roots + ReadOnlyRoots ro_roots(isolate); + { + RootIndex pos = RootIndex::kFirstReadOnlyRoot; +#define ADD_ROOT(_, value, CamelName) \ + { \ + Tagged_t ptr = V8HeapCompressionScheme::CompressTagged( \ + ro_roots.unchecked_##value().ptr()); \ + sorted_roots_[ptr].push_back(pos); \ + camel_names_[RootIndex::k##CamelName] = #CamelName; \ + ++pos; \ + } + READ_ONLY_ROOT_LIST(ADD_ROOT) +#undef ADD_ROOT + } + + // Compute start and end of ranges + for (auto& entry : sorted_roots_) { + Tagged_t ptr = entry.first; + std::list& roots = entry.second; + + for (RootIndex pos : roots) { + std::string& name = camel_names_.at(pos); + for (ObjectRange& range : root_ranges_) { + range.VisitNextRoot(name, pos, ptr); + } + } + } + } + + // Used to compute ranges of objects next to each other on the r/o heap. A + // range contains a set of RootIndex's and computes the one with the lowest + // and highest address, aborting if they are not continuous (i.e. there is + // some other object in between). + class ObjectRange { + public: + ObjectRange(const std::string& instance_type, + const std::list objects) + : ObjectRange(instance_type, instance_type, objects) {} + ObjectRange(const std::string& first, const std::string& last, + const std::list objects) + : first_instance_type_(first), + last_instance_type_(last), + objects_(objects) {} + ~ObjectRange() { + CHECK(!open_); + CHECK(first_ != RootIndex::kRootListLength); + CHECK(last_ != RootIndex::kRootListLength); + } + + ObjectRange(ObjectRange& range) = delete; + ObjectRange& operator=(ObjectRange& range) = delete; + ObjectRange(ObjectRange&& range) V8_NOEXCEPT = default; + ObjectRange& operator=(ObjectRange&& range) V8_NOEXCEPT = default; + + // Needs to be called in order of addresses. + void VisitNextRoot(const std::string& root_name, RootIndex idx, + Tagged_t ptr) { + auto test = [&](RootIndex obj) { + return std::find(objects_.begin(), objects_.end(), obj) != + objects_.end(); + }; + if (open_) { + if (test(idx)) { + last_ = idx; + } else { + open_ = false; + } + return; + } + + if (first_ == RootIndex::kRootListLength) { + if (test(idx)) { + first_ = idx; + open_ = true; + } + } else { + // If this check fails then the read only space was rearranged and what + // used to be a set of objects with continuous addresses is not anymore. + CHECK_WITH_MSG(!test(idx), + (first_instance_type_ + "-" + last_instance_type_ + + " does not specify a continuous range of " + "objects. There is a gap before " + + root_name) + .c_str()); + } + } + + const std::string& first_instance_type() const { + return first_instance_type_; + } + const std::string& last_instance_type() const { + return last_instance_type_; + } + RootIndex first() const { return first_; } + RootIndex last() const { return last_; } + bool singleton() const { + return first_instance_type_ == last_instance_type_; + } + + private: + RootIndex first_ = RootIndex::kRootListLength; + RootIndex last_ = RootIndex::kRootListLength; + std::string first_instance_type_; + std::string last_instance_type_; + + std::list objects_; + bool open_ = false; + + friend class StaticRootsTableGenImpl; + }; + + size_t RangesHash() const { + size_t hash = 0; + for (auto& range : root_ranges_) { + hash = base::hash_combine(hash, range.first_, + base::hash_combine(hash, range.last_)); + } + return hash; + } + + const std::map>& sorted_roots() { + return sorted_roots_; + } + + const std::list& root_ranges() { return root_ranges_; } + + const std::string& camel_name(RootIndex idx) { return camel_names_.at(idx); } + + private: + std::map> sorted_roots_; + std::list root_ranges_; + std::unordered_map camel_names_; +}; + +// Check if the computed ranges are still valid, ie. all their members lie +// between known boundaries. +void StaticRootsTableGen::VerifyRanges(Isolate* isolate) { +#if V8_STATIC_ROOTS_BOOL + StaticRootsTableGenImpl gen(isolate); + CHECK_WITH_MSG(kStaticReadOnlyRootRangesHash == gen.RangesHash(), + "StaticReadOnlyRanges changed. Run " + "tools/dev/gen-static-roots.py` to update static-roots.h."); +#endif // V8_STATIC_ROOTS_BOOL +} + void StaticRootsTableGen::write(Isolate* isolate, const char* file) { CHECK_WITH_MSG(!V8_STATIC_ROOTS_BOOL, "Re-generating the table of roots is only supported in builds " @@ -22,7 +207,6 @@ void StaticRootsTableGen::write(Isolate* isolate, const char* file) { static_assert(static_cast(RootIndex::kFirstReadOnlyRoot) == 0); std::ofstream out(file); - const auto ro_roots = ReadOnlyRoots(isolate); out << "// Copyright 2022 the V8 project authors. All rights reserved.\n" << "// Use of this source code is governed by a BSD-style license " @@ -39,13 +223,15 @@ void StaticRootsTableGen::write(Isolate* isolate, const char* file) { << "\n" << "#if V8_STATIC_ROOTS_BOOL\n" << "\n" + << "#include \"src/objects/instance-type.h\"\n" + << "#include \"src/roots/roots.h\"\n" + << "\n" << "// Disabling Wasm or Intl invalidates the contents of " "static-roots.h.\n" << "// TODO(olivf): To support static roots for multiple build " "configurations we\n" << "// will need to generate target specific versions of " - "this " - "file.\n" + "this file.\n" << "static_assert(V8_ENABLE_WEBASSEMBLY);\n" << "static_assert(V8_INTL_SUPPORT);\n" << "\n" @@ -57,29 +243,23 @@ void StaticRootsTableGen::write(Isolate* isolate, const char* file) { // Output a symbol for every root. Ordered by ptr to make it easier to see the // memory layout of the read only page. const auto size = static_cast(RootIndex::kReadOnlyRootsCount); - { - std::map> sorted_roots; -#define ADD_ROOT(_, value, CamelName) \ - { \ - Tagged_t ptr = V8HeapCompressionScheme::CompressTagged( \ - ro_roots.unchecked_##value().ptr()); \ - sorted_roots[ptr].push_back(#CamelName); \ - } - READ_ONLY_ROOT_LIST(ADD_ROOT) -#undef ADD_ROOT + StaticRootsTableGenImpl gen(isolate); - for (auto& entry : sorted_roots) { - Tagged_t ptr = entry.first; - std::list& names = entry.second; + for (auto& entry : gen.sorted_roots()) { + Tagged_t ptr = entry.first; + const std::list& roots = entry.second; - for (std::string& name : names) { - out << " static constexpr Tagged_t k" << name << " ="; - if (name.length() + 39 > 80) out << "\n "; - out << " " << reinterpret_cast(ptr) << ";\n"; - } + for (RootIndex root : roots) { + static const char* kPreString = " static constexpr Tagged_t k"; + const std::string& name = gen.camel_name(root); + size_t ptr_len = ceil(log2(ptr) / 4.0); + // Full line is: "kPreString|name = 0x.....;" + size_t len = strlen(kPreString) + name.length() + 5 + ptr_len + 1; + out << kPreString << name << " ="; + if (len > 80) out << "\n "; + out << " " << reinterpret_cast(ptr) << ";\n"; } } - out << "};\n"; // Output in order of roots table @@ -93,6 +273,49 @@ void StaticRootsTableGen::write(Isolate* isolate, const char* file) { #undef ENTRY out << "};\n"; } + out << "\n"; + + // Output interesting ranges of consecutive roots + out << "inline constexpr base::Optional>\n" + "StaticReadOnlyRootMapRange(InstanceType type) {\n" + " switch (type) {\n"; + static const char* kPreString = " return {{RootIndex::k"; + static const char* kMidString = " RootIndex::k"; + for (const auto& rng : gen.root_ranges()) { + if (!rng.singleton()) continue; + out << " case " << rng.first_instance_type() << ":\n"; + const std::string& first_name = gen.camel_name(rng.first()); + const std::string& last_name = gen.camel_name(rng.last()); + // Full line is: " kPreString|first_name,kMidString|last_name}};" + size_t len = 2 + strlen(kPreString) + first_name.length() + 1 + + strlen(kMidString) + last_name.length() + 3; + out << " " << kPreString << first_name << ","; + if (len > 80) out << "\n "; + out << kMidString << last_name << "}};\n"; + } + out << " default: {\n }\n" + " }\n" + " return {};\n}\n\n"; + out << "inline constexpr base::Optional>\n" + "StaticReadOnlyRootMapRange(InstanceType first, InstanceType last) " + "{\n"; + for (const auto& rng : gen.root_ranges()) { + if (rng.singleton()) continue; + out << " if (first == " << rng.first_instance_type() + << " && last == " << rng.last_instance_type() << ") {\n"; + const std::string& first_name = gen.camel_name(rng.first()); + const std::string& last_name = gen.camel_name(rng.last()); + // Full line is: "kPreString|first_name,kMidString|last_name}};" + size_t len = strlen(kPreString) + first_name.length() + 1 + + strlen(kMidString) + last_name.length() + 3; + out << " return {{RootIndex::k" << first_name << ","; + if (len > 80) out << "\n "; + out << " RootIndex::k" << last_name << "}};\n" + << " }\n"; + } + out << " return {};\n}\n\n"; + out << "static constexpr size_t kStaticReadOnlyRootRangesHash = " + << gen.RangesHash() << "UL;\n"; out << "\n} // namespace internal\n" << "} // namespace v8\n" diff --git a/src/snapshot/static-roots-gen.h b/src/snapshot/static-roots-gen.h index 2422df75d0..b99e290d87 100644 --- a/src/snapshot/static-roots-gen.h +++ b/src/snapshot/static-roots-gen.h @@ -13,6 +13,7 @@ class Isolate; class StaticRootsTableGen { public: static void write(Isolate* isolate, const char* file); + static void VerifyRanges(Isolate* isolate); }; } // namespace internal diff --git a/src/trap-handler/handler-inside-posix.cc b/src/trap-handler/handler-inside-posix.cc index 17af3d75dc..39d6e0d3ae 100644 --- a/src/trap-handler/handler-inside-posix.cc +++ b/src/trap-handler/handler-inside-posix.cc @@ -91,7 +91,12 @@ class UnmaskOobSignalScope { #ifdef V8_TRAP_HANDLER_VIA_SIMULATOR // This is the address where we continue on a failed "ProbeMemory". It's defined // in "handler-outside-simulator.cc". -extern "C" char v8_probe_memory_continuation[]; +extern char probe_memory_continuation[] +#if V8_OS_DARWIN + asm("_v8_simulator_probe_memory_continuation"); +#else + asm("v8_simulator_probe_memory_continuation"); +#endif #endif // V8_TRAP_HANDLER_VIA_SIMULATOR bool TryHandleSignal(int signum, siginfo_t* info, void* context) { @@ -149,7 +154,7 @@ bool TryHandleSignal(int signum, siginfo_t* info, void* context) { auto* return_reg = CONTEXT_REG(rax, RAX); *return_reg = landing_pad; // Continue at the memory probing continuation. - *context_ip = reinterpret_cast(&v8_probe_memory_continuation); + *context_ip = reinterpret_cast(&probe_memory_continuation); #else if (!TryFindLandingPad(fault_addr, &landing_pad)) return false; diff --git a/src/trap-handler/handler-inside-win.cc b/src/trap-handler/handler-inside-win.cc index ffadcd8f13..f63875f4ae 100644 --- a/src/trap-handler/handler-inside-win.cc +++ b/src/trap-handler/handler-inside-win.cc @@ -56,7 +56,8 @@ struct TEB { #ifdef V8_TRAP_HANDLER_VIA_SIMULATOR // This is the address where we continue on a failed "ProbeMemory". It's defined // in "handler-outside-simulator.cc". -extern "C" char v8_probe_memory_continuation[]; +extern char probe_memory_continuation[] asm( + "v8_simulator_probe_memory_continuation"); #endif // V8_TRAP_HANDLER_VIA_SIMULATOR bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) { @@ -110,7 +111,7 @@ bool TryHandleWasmTrap(EXCEPTION_POINTERS* exception) { exception->ContextRecord->Rax = landing_pad; // Continue at the memory probing continuation. exception->ContextRecord->Rip = - reinterpret_cast(&v8_probe_memory_continuation); + reinterpret_cast(&probe_memory_continuation); #else if (!TryFindLandingPad(fault_addr, &landing_pad)) return false; diff --git a/src/trap-handler/handler-outside-simulator.cc b/src/trap-handler/handler-outside-simulator.cc index 5e58719e7f..955b3c0b7c 100644 --- a/src/trap-handler/handler-outside-simulator.cc +++ b/src/trap-handler/handler-outside-simulator.cc @@ -14,29 +14,29 @@ #define SYMBOL(name) #name #endif // !V8_OS_DARWIN -// Define the ProbeMemory function declared in trap-handler-simulators.h. -asm( - ".globl " SYMBOL(ProbeMemory) " \n" - SYMBOL(ProbeMemory) ": \n" +// Define the v8::internal::trap_handler::ProbeMemory function declared in +// trap-handler-simulators.h. +asm(".globl " SYMBOL(v8_internal_simulator_ProbeMemory) " \n" + SYMBOL(v8_internal_simulator_ProbeMemory) ": \n" // First parameter (address) passed in %rdi on Linux/Mac, and %rcx on Windows. // The second parameter (pc) is unused here. It is read by the trap handler // instead. #if V8_OS_WIN - " movb (%rcx), %al \n" + " movb (%rcx), %al \n" #else - " movb (%rdi), %al \n" + " movb (%rdi), %al \n" #endif // V8_OS_WIN // Return 0 on success. - " xorl %eax, %eax \n" + " xorl %eax, %eax \n" // Place an additional "ret" here instead of falling through to the one // below, because (some) toolchain(s) on Mac set ".subsections_via_symbols", // which can cause the "ret" below to be placed elsewhere. An alternative // prevention would be to add ".alt_entry" (see // https://reviews.llvm.org/D79926), but just adding a "ret" is simpler. - " ret \n" - ".globl " SYMBOL(v8_probe_memory_continuation) "\n" - SYMBOL(v8_probe_memory_continuation) ": \n" + " ret \n" + ".globl " SYMBOL(v8_simulator_probe_memory_continuation) " \n" + SYMBOL(v8_simulator_probe_memory_continuation) ": \n" // If the trap handler continues here, it wrote the landing pad in %rax. - " ret \n"); + " ret \n"); #endif diff --git a/src/trap-handler/trap-handler-simulator.h b/src/trap-handler/trap-handler-simulator.h index bfceb49697..0ab80d202e 100644 --- a/src/trap-handler/trap-handler-simulator.h +++ b/src/trap-handler/trap-handler-simulator.h @@ -7,6 +7,8 @@ #include +#include "include/v8config.h" + // This header defines the ProbeMemory function to be used by simulators to // trigger a signal at a defined location, before doing an actual memory access. @@ -16,9 +18,7 @@ #error "Do only include this file on simulator builds on x64." #endif -namespace v8 { -namespace internal { -namespace trap_handler { +namespace v8::internal::trap_handler { // Probe a memory address by doing a 1-byte read from the given address. If the // address is not readable, this will cause a trap as usual, but the trap @@ -28,10 +28,16 @@ namespace trap_handler { // is not registered as a protected instruction, the signal will be propagated // as usual. // If the read at {address} succeeds, this function returns {0} instead. -extern "C" uintptr_t ProbeMemory(uintptr_t address, uintptr_t pc); +uintptr_t ProbeMemory(uintptr_t address, uintptr_t pc) +// Specify an explicit symbol name (defined in +// handler-outside-simulator.cc). Just {extern "C"} would produce +// "ProbeMemory", but we want something more expressive on stack traces. +#if V8_OS_DARWIN + asm("_v8_internal_simulator_ProbeMemory"); +#else + asm("v8_internal_simulator_ProbeMemory"); +#endif -} // namespace trap_handler -} // namespace internal -} // namespace v8 +} // namespace v8::internal::trap_handler #endif // V8_TRAP_HANDLER_TRAP_HANDLER_SIMULATOR_H_ diff --git a/src/wasm/baseline/arm/liftoff-assembler-arm.h b/src/wasm/baseline/arm/liftoff-assembler-arm.h index c4727f8134..adf797dcaa 100644 --- a/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -162,7 +162,7 @@ inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst, LeaveCC, al); } -template inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst, @@ -184,7 +184,7 @@ inline void I64Shiftop(LiftoffAssembler* assm, LiftoffRegister dst, Register* later_src_reg = is_left_shift ? &src_low : &src_high; if (*later_src_reg == clobbered_dst_reg) { *later_src_reg = assm->GetUnusedRegister(kGpReg, pinned).gp(); - assm->TurboAssembler::Move(*later_src_reg, clobbered_dst_reg); + assm->MacroAssembler::Move(*later_src_reg, clobbered_dst_reg); } (assm->*op)(dst_low, dst_high, src_low, src_high, amount_capped); @@ -210,14 +210,14 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, RegisterType dst, MinOrMax min_or_max) { DCHECK(RegisterType::kSizeInBytes == 4 || RegisterType::kSizeInBytes == 8); if (lhs == rhs) { - assm->TurboAssembler::Move(dst, lhs); + assm->MacroAssembler::Move(dst, lhs); return; } Label done, is_nan; if (min_or_max == MinOrMax::kMin) { - assm->TurboAssembler::FloatMin(dst, lhs, rhs, &is_nan); + assm->MacroAssembler::FloatMin(dst, lhs, rhs, &is_nan); } else { - assm->TurboAssembler::FloatMax(dst, lhs, rhs, &is_nan); + assm->MacroAssembler::FloatMax(dst, lhs, rhs, &is_nan); } assm->b(&done); assm->bind(&is_nan); @@ -547,7 +547,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. AllocateStackSpace(frame_size); // Jump back to the start of the function, from {pc_offset()} to @@ -584,14 +584,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::Move(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: { DCHECK(RelocInfo::IsNoInfo(rmode)); int32_t low_word = value.to_i64(); int32_t high_word = value.to_i64() >> 32; - TurboAssembler::Move(reg.low_gp(), Operand(low_word)); - TurboAssembler::Move(reg.high_gp(), Operand(high_word)); + MacroAssembler::Move(reg.low_gp(), Operand(low_word)); + MacroAssembler::Move(reg.high_gp(), Operand(high_word)); break; } case kF32: @@ -1450,7 +1450,7 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); DCHECK(kind == kI32 || is_reference(kind)); - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, @@ -1828,7 +1828,7 @@ bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount) { - liftoff::I64Shiftop<&TurboAssembler::LslPair, true>(this, dst, src, amount); + liftoff::I64Shiftop<&MacroAssembler::LslPair, true>(this, dst, src, amount); } void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, @@ -1843,7 +1843,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount) { - liftoff::I64Shiftop<&TurboAssembler::AsrPair, false>(this, dst, src, amount); + liftoff::I64Shiftop<&MacroAssembler::AsrPair, false>(this, dst, src, amount); } void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, @@ -1858,7 +1858,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount) { - liftoff::I64Shiftop<&TurboAssembler::LsrPair, false>(this, dst, src, amount); + liftoff::I64Shiftop<&MacroAssembler::LsrPair, false>(this, dst, src, amount); } void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, @@ -2085,7 +2085,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, LiftoffRegister src, Label* trap) { switch (opcode) { case kExprI32ConvertI64: - TurboAssembler::Move(dst.gp(), src.low_gp()); + MacroAssembler::Move(dst.gp(), src.low_gp()); return true; case kExprI32SConvertF32: { UseScratchRegisterScope temps(this); @@ -2272,7 +2272,7 @@ void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst, void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Move(dst.low_gp(), src.low_gp()); + MacroAssembler::Move(dst.low_gp(), src.low_gp()); mov(dst.high_gp(), Operand(src.low_gp(), ASR, 31)); } @@ -2472,7 +2472,7 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr, } else if (memtype == MachineType::Int64()) { vld1(Neon32, NeonListOperand(dst.low_fp()), NeonMemOperand(actual_src_addr)); - TurboAssembler::Move(dst.high_fp(), dst.low_fp()); + MacroAssembler::Move(dst.high_fp(), dst.low_fp()); } } } @@ -2484,13 +2484,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src, UseScratchRegisterScope temps(this); Register actual_src_addr = liftoff::CalculateActualAddress( this, &temps, addr, offset_reg, offset_imm); - TurboAssembler::Move(liftoff::GetSimd128Register(dst), + MacroAssembler::Move(liftoff::GetSimd128Register(dst), liftoff::GetSimd128Register(src)); *protected_load_pc = pc_offset(); LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx); NeonListOperand dst_op = NeonListOperand(load_params.low_op ? dst.low_fp() : dst.high_fp()); - TurboAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx, + MacroAssembler::LoadLane(load_params.sz, dst_op, load_params.laneidx, NeonMemOperand(actual_src_addr)); } @@ -2506,7 +2506,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset, LoadStoreLaneParams store_params(type.mem_rep(), laneidx); NeonListOperand src_op = NeonListOperand(store_params.low_op ? src.low_fp() : src.high_fp()); - TurboAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx, + MacroAssembler::StoreLane(store_params.sz, src_op, store_params.laneidx, NeonMemOperand(actual_dst_addr)); } @@ -2519,7 +2519,7 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst, if (dst == lhs) { // dst will be overwritten, so keep the table somewhere else. QwNeonRegister tbl = temps.AcquireQ(); - TurboAssembler::Move(tbl, liftoff::GetSimd128Register(lhs)); + MacroAssembler::Move(tbl, liftoff::GetSimd128Register(lhs)); table = NeonListOperand(tbl); } @@ -2564,8 +2564,8 @@ void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst, void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Move(dst.low_fp(), src.fp()); - TurboAssembler::Move(dst.high_fp(), src.fp()); + MacroAssembler::Move(dst.low_fp(), src.fp()); + MacroAssembler::Move(dst.high_fp(), src.fp()); } void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, @@ -4243,7 +4243,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { void LiftoffAssembler::AssertUnreachable(AbortReason reason) { // Asserts unreachable within the wasm code. - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 1bc42ed72a..74f64a340d 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -357,7 +357,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::Claim}. + // decrementing the SP; consult {MacroAssembler::Claim}. Claim(frame_size, 1); // Jump back to the start of the function, from {pc_offset()} to @@ -438,7 +438,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, Register instance, int offset) { DCHECK_LE(0, offset); - LoadTaggedPointerField(dst, MemOperand{instance, offset}); + LoadTaggedField(dst, MemOperand{instance, offset}); } void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance, @@ -461,7 +461,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, unsigned shift_amount = !needs_shift ? 0 : COMPRESS_POINTERS_BOOL ? 2 : 3; MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm, false, shift_amount); - LoadTaggedPointerField(dst, src_op); + LoadTaggedField(dst, src_op); } void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr, @@ -502,7 +502,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, bind(&write_barrier); JumpIfSmi(src.gp(), &exit); if (COMPRESS_POINTERS_BOOL) { - DecompressTaggedPointer(src.gp(), src.gp()); + DecompressTagged(src.gp(), src.gp()); } CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, @@ -3252,7 +3252,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { } void LiftoffAssembler::AssertUnreachable(AbortReason reason) { - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { diff --git a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 0f28d7157a..1db3255a96 100644 --- a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -288,7 +288,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. AllocateStackSpace(frame_size); // Jump back to the start of the function, from {pc_offset()} to @@ -319,21 +319,21 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode)); + MacroAssembler::Move(reg.gp(), Immediate(value.to_i32(), rmode)); break; case kI64: { DCHECK(RelocInfo::IsNoInfo(rmode)); int32_t low_word = value.to_i64(); int32_t high_word = value.to_i64() >> 32; - TurboAssembler::Move(reg.low_gp(), Immediate(low_word)); - TurboAssembler::Move(reg.high_gp(), Immediate(high_word)); + MacroAssembler::Move(reg.low_gp(), Immediate(low_word)); + MacroAssembler::Move(reg.high_gp(), Immediate(high_word)); break; } case kF32: - TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); break; default: UNREACHABLE(); @@ -1704,7 +1704,7 @@ inline LiftoffRegister ReplaceInPair(LiftoffRegister pair, Register old_reg, inline void Emit64BitShiftOperation( LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, - Register amount, void (TurboAssembler::*emit_shift)(Register, Register)) { + Register amount, void (MacroAssembler::*emit_shift)(Register, Register)) { // Temporary registers cannot overlap with {dst}. LiftoffRegList pinned{dst}; @@ -1743,7 +1743,7 @@ inline void Emit64BitShiftOperation( void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShlPair_cl); + &MacroAssembler::ShlPair_cl); } void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, @@ -1762,7 +1762,7 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::SarPair_cl); + &MacroAssembler::SarPair_cl); } void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, @@ -1781,7 +1781,7 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShrPair_cl); + &MacroAssembler::ShrPair_cl); } void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, @@ -2025,10 +2025,10 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { static constexpr uint32_t kSignBit = uint32_t{1} << 31; if (dst == src) { - TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); + MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); Andps(dst, liftoff::kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit - 1); + MacroAssembler::Move(dst, kSignBit - 1); Andps(dst, src); } } @@ -2036,10 +2036,10 @@ void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint32_t kSignBit = uint32_t{1} << 31; if (dst == src) { - TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); + MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); Xorps(dst, liftoff::kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit); + MacroAssembler::Move(dst, kSignBit); Xorps(dst, src); } } @@ -2162,10 +2162,10 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { static constexpr uint64_t kSignBit = uint64_t{1} << 63; if (dst == src) { - TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); + MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit - 1); Andpd(dst, liftoff::kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit - 1); + MacroAssembler::Move(dst, kSignBit - 1); Andpd(dst, src); } } @@ -2173,10 +2173,10 @@ void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint64_t kSignBit = uint64_t{1} << 63; if (dst == src) { - TurboAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); + MacroAssembler::Move(liftoff::kScratchDoubleReg, kSignBit); Xorpd(dst, liftoff::kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit); + MacroAssembler::Move(dst, kSignBit); Xorpd(dst, src); } } @@ -2739,7 +2739,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst, assm->cmov(zero, dst.gp(), tmp); } -template +template inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, base::Optional feature = base::nullopt) { @@ -3279,14 +3279,14 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, const uint8_t imms[16]) { uint64_t vals[2]; memcpy(vals, imms, sizeof(vals)); - TurboAssembler::Move(dst.fp(), vals[0]); + MacroAssembler::Move(dst.fp(), vals[0]); uint64_t high = vals[1]; Register tmp = GetUnusedRegister(RegClass::kGpReg, {}).gp(); - TurboAssembler::Move(tmp, Immediate(high & 0xffff'ffff)); + MacroAssembler::Move(tmp, Immediate(high & 0xffff'ffff)); Pinsrd(dst.fp(), tmp, 2); - TurboAssembler::Move(tmp, Immediate(high >> 32)); + MacroAssembler::Move(tmp, Immediate(high >> 32)); Pinsrd(dst.fp(), tmp, 3); } @@ -3347,7 +3347,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqb>(this, dst, src); } void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst, @@ -3483,7 +3483,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqw>(this, dst, src); } void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst, @@ -3694,7 +3694,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqd>(this, dst, src); } void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst, @@ -3866,7 +3866,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqq>(this, dst, src, SSE4_1); } void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, @@ -4591,7 +4591,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { } void LiftoffAssembler::AssertUnreachable(AbortReason reason) { - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { diff --git a/src/wasm/baseline/liftoff-assembler.cc b/src/wasm/baseline/liftoff-assembler.cc index 1e3a861df2..8383b43c40 100644 --- a/src/wasm/baseline/liftoff-assembler.cc +++ b/src/wasm/baseline/liftoff-assembler.cc @@ -521,7 +521,7 @@ void LiftoffAssembler::CacheState::InitMerge(const CacheState& source, kRegistersAllowed, kReuseRegisters, used_regs); } -void LiftoffAssembler::CacheState::Steal(const CacheState& source) { +void LiftoffAssembler::CacheState::Steal(CacheState& source) { // Just use the move assignment operator. *this = std::move(source); } @@ -610,7 +610,7 @@ AssemblerOptions DefaultLiftoffOptions() { return AssemblerOptions{}; } } // namespace LiftoffAssembler::LiftoffAssembler(std::unique_ptr buffer) - : TurboAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo, + : MacroAssembler(nullptr, DefaultLiftoffOptions(), CodeObjectRequired::kNo, std::move(buffer)) { set_abort_hard(true); // Avoid calls to Abort. } @@ -621,22 +621,11 @@ LiftoffAssembler::~LiftoffAssembler() { } } -LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot, - LiftoffRegList pinned) { - if (slot.is_reg()) return slot.reg(); - LiftoffRegister reg = GetUnusedRegister(reg_class_for(slot.kind()), pinned); - return LoadToRegister(slot, reg); -} - -LiftoffRegister LiftoffAssembler::LoadToRegister(VarState slot, - LiftoffRegister reg) { - if (slot.is_const()) { - LoadConstant(reg, slot.constant()); - } else { - DCHECK(slot.is_stack()); - Fill(reg, slot.offset(), slot.kind()); - } - return reg; +void LiftoffAssembler::LoadToRegister(VarState slot, LiftoffRegList pinned, + LiftoffRegister* out_reg) { + DCHECK(!slot.is_reg()); + *out_reg = GetUnusedRegister(reg_class_for(slot.kind()), pinned); + LoadToFixedRegister(slot, *out_reg); } LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot, @@ -661,9 +650,7 @@ LiftoffRegister LiftoffAssembler::PeekToRegister(int index, LiftoffRegList pinned) { DCHECK_LT(index, cache_state_.stack_state.size()); VarState& slot = cache_state_.stack_state.end()[-1 - index]; - if (slot.is_reg()) { - return slot.reg(); - } + if (V8_LIKELY(slot.is_reg())) return slot.reg(); LiftoffRegister reg = LoadToRegister(slot, pinned); cache_state_.inc_used(reg); slot.MakeRegister(reg); @@ -1317,11 +1304,10 @@ bool LiftoffAssembler::ValidateCacheState() const { } #endif -LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates) { - // Spill one cached value to free a register. - LiftoffRegister spill_reg = cache_state_.GetNextSpillReg(candidates); - SpillRegister(spill_reg); - return spill_reg; +void LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates, + LiftoffRegister* spilled_reg) { + *spilled_reg = cache_state_.GetNextSpillReg(candidates); + SpillRegister(*spilled_reg); } LiftoffRegister LiftoffAssembler::SpillAdjacentFpRegisters( diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index e0e9501bcd..a2996cc315 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -98,7 +98,7 @@ class FreezeCacheState { #endif }; -class LiftoffAssembler : public TurboAssembler { +class LiftoffAssembler : public MacroAssembler { public: // Each slot in our stack frame currently has exactly 8 bytes. static constexpr int kStackSlotSize = 8; @@ -457,7 +457,7 @@ class LiftoffAssembler : public TurboAssembler { void InitMerge(const CacheState& source, uint32_t num_locals, uint32_t arity, uint32_t stack_depth); - void Steal(const CacheState& source); + void Steal(CacheState& source); void Split(const CacheState& source); @@ -473,15 +473,41 @@ class LiftoffAssembler : public TurboAssembler { explicit LiftoffAssembler(std::unique_ptr); ~LiftoffAssembler() override; - LiftoffRegister LoadToRegister(VarState slot, LiftoffRegList pinned); + // Load a cache slot to a free register. + V8_INLINE LiftoffRegister LoadToRegister(VarState slot, + LiftoffRegList pinned) { + if (V8_LIKELY(slot.is_reg())) return slot.reg(); + // TODO(clemensb): Remove this hack once https://reviews.llvm.org/D141020 is + // available. + std::aligned_storage_t + reg_storage; + LiftoffRegister* out_reg = reinterpret_cast(®_storage); + LoadToRegister(slot, pinned, out_reg); + return *out_reg; + } - LiftoffRegister LoadToRegister(VarState slot, LiftoffRegister dst); + // Slow path called for the method above. + // TODO(clemensb): Use a return value instead of output parameter once + // https://reviews.llvm.org/D141020 is available. + V8_NOINLINE V8_PRESERVE_MOST void LoadToRegister(VarState slot, + LiftoffRegList pinned, + LiftoffRegister* dst); - LiftoffRegister PopToRegister(LiftoffRegList pinned = {}) { + // Load a non-register cache slot to a given (fixed) register. + void LoadToFixedRegister(VarState slot, LiftoffRegister reg) { + DCHECK(slot.is_const() || slot.is_stack()); + if (slot.is_const()) { + LoadConstant(reg, slot.constant()); + } else { + Fill(reg, slot.offset(), slot.kind()); + } + } + + V8_INLINE LiftoffRegister PopToRegister(LiftoffRegList pinned = {}) { DCHECK(!cache_state_.stack_state.empty()); VarState slot = cache_state_.stack_state.back(); cache_state_.stack_state.pop_back(); - if (slot.is_reg()) { + if (V8_LIKELY(slot.is_reg())) { cache_state_.dec_used(slot.reg()); return slot.reg(); } @@ -492,7 +518,7 @@ class LiftoffAssembler : public TurboAssembler { DCHECK(!cache_state_.stack_state.empty()); VarState slot = cache_state_.stack_state.back(); cache_state_.stack_state.pop_back(); - if (slot.is_reg()) { + if (V8_LIKELY(slot.is_reg())) { cache_state_.dec_used(slot.reg()); if (slot.reg() == reg) return; if (cache_state_.is_used(reg)) SpillRegister(reg); @@ -500,7 +526,7 @@ class LiftoffAssembler : public TurboAssembler { return; } if (cache_state_.is_used(reg)) SpillRegister(reg); - LoadToRegister(slot, reg); + LoadToFixedRegister(slot, reg); } // Use this to pop a value into a register that has no other uses, so it @@ -580,7 +606,7 @@ class LiftoffAssembler : public TurboAssembler { cache_state_.stack_state.emplace_back(kind, NextSpillOffset(kind)); } - void SpillRegister(LiftoffRegister); + V8_NOINLINE V8_PRESERVE_MOST void SpillRegister(LiftoffRegister); uint32_t GetNumUses(LiftoffRegister reg) const { return cache_state_.get_use_count(reg); @@ -625,13 +651,19 @@ class LiftoffAssembler : public TurboAssembler { LiftoffRegister GetUnusedRegister(LiftoffRegList candidates) { DCHECK(!cache_state_.frozen); DCHECK(!candidates.is_empty()); - if (cache_state_.has_unused_register(candidates)) { + if (V8_LIKELY(cache_state_.has_unused_register(candidates))) { return cache_state_.unused_register(candidates); } if (cache_state_.has_volatile_register(candidates)) { return cache_state_.take_volatile_register(candidates); } - return SpillOneRegister(candidates); + // TODO(clemensb): Remove this hack once https://reviews.llvm.org/D141020 is + // available. + std::aligned_storage_t + reg_storage; + LiftoffRegister* out_reg = reinterpret_cast(®_storage); + SpillOneRegister(candidates, out_reg); + return *out_reg; } // Performs operations on locals and the top {arity} value stack entries @@ -1652,7 +1684,11 @@ class LiftoffAssembler : public TurboAssembler { private: LiftoffRegister LoadI64HalfIntoRegister(VarState slot, RegPairHalf half); - V8_NOINLINE LiftoffRegister SpillOneRegister(LiftoffRegList candidates); + // Spill one of the candidate registers. + // TODO(clemensb): Use return value instead of output parameter once + // https://reviews.llvm.org/D141020 is available. + V8_NOINLINE V8_PRESERVE_MOST void SpillOneRegister( + LiftoffRegList candidates, LiftoffRegister* spilled_reg); // Spill one or two fp registers to get a pair of adjacent fp registers. LiftoffRegister SpillAdjacentFpRegisters(LiftoffRegList pinned); diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index e916ce1ca5..1c47032861 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -1405,7 +1405,7 @@ class LiftoffCompiler { } __ bind(&block->try_info->catch_label); - __ cache_state()->Steal(block->try_info->catch_state); + __ cache_state()->Split(block->try_info->catch_state); if (!block->try_info->in_handler) { block->try_info->in_handler = true; num_exceptions_++; @@ -7045,7 +7045,22 @@ class LiftoffCompiler { } void StringAsWtf16(FullDecoder* decoder, const Value& str, Value* result) { - RefAsNonNull(decoder, str, result); + LiftoffRegList pinned; + + LiftoffRegister str_reg = pinned.set(__ PopToRegister(pinned)); + MaybeEmitNullCheck(decoder, str_reg.gp(), pinned, str.type); + LiftoffAssembler::VarState str_var(kRef, str_reg, 0); + + CallRuntimeStub(WasmCode::kWasmStringAsWtf16, + MakeSig::Returns(kRef).Params(kRef), + { + str_var, + }, + decoder->position()); + RegisterDebugSideTableEntry(decoder, DebugSideTableBuilder::kDidSpill); + + LiftoffRegister result_reg(kReturnRegister0); + __ PushRegister(kRef, result_reg); } void StringViewWtf16GetCodeUnit(FullDecoder* decoder, const Value& view, @@ -7293,6 +7308,22 @@ class LiftoffCompiler { __ PushRegister(kRef, result_reg); } + void StringHash(FullDecoder* decoder, const Value& string, Value* result) { + LiftoffRegList pinned; + LiftoffRegister string_reg = pinned.set( + __ LoadToRegister(__ cache_state()->stack_state.end()[-1], pinned)); + MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, string.type); + LiftoffAssembler::VarState string_var(kRef, string_reg, 0); + + CallRuntimeStub(WasmCode::kWasmStringHash, + MakeSig::Returns(kI32).Params(kRef), {string_var}, + decoder->position()); + + LiftoffRegister result_reg(kReturnRegister0); + __ DropValues(1); + __ PushRegister(kI32, result_reg); + } + void Forward(FullDecoder* decoder, const Value& from, Value* to) { // Nothing to do here. } diff --git a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h index c439cb5b8a..3ff5c2bace 100644 --- a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h +++ b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h @@ -222,7 +222,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( // We can't run out of space, just pass anything big enough to not cause the // assembler to try to grow the buffer. constexpr int kAvailableSpace = 256; - TurboAssembler patching_assembler( + MacroAssembler patching_assembler( nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); @@ -313,16 +313,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: - TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); break; case kF32: - TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); break; default: UNREACHABLE(); @@ -441,27 +441,27 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, break; case LoadType::kI32Load16U: case LoadType::kI64Load16U: - TurboAssembler::Ld_hu(dst.gp(), src_op); + MacroAssembler::Ld_hu(dst.gp(), src_op); break; case LoadType::kI32Load16S: case LoadType::kI64Load16S: - TurboAssembler::Ld_h(dst.gp(), src_op); + MacroAssembler::Ld_h(dst.gp(), src_op); break; case LoadType::kI64Load32U: - TurboAssembler::Ld_wu(dst.gp(), src_op); + MacroAssembler::Ld_wu(dst.gp(), src_op); break; case LoadType::kI32Load: case LoadType::kI64Load32S: - TurboAssembler::Ld_w(dst.gp(), src_op); + MacroAssembler::Ld_w(dst.gp(), src_op); break; case LoadType::kI64Load: - TurboAssembler::Ld_d(dst.gp(), src_op); + MacroAssembler::Ld_d(dst.gp(), src_op); break; case LoadType::kF32Load: - TurboAssembler::Fld_s(dst.fp(), src_op); + MacroAssembler::Fld_s(dst.fp(), src_op); break; case LoadType::kF64Load: - TurboAssembler::Fld_d(dst.fp(), src_op); + MacroAssembler::Fld_d(dst.fp(), src_op); break; case LoadType::kS128Load: UNREACHABLE(); @@ -487,20 +487,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, break; case StoreType::kI32Store16: case StoreType::kI64Store16: - TurboAssembler::St_h(src.gp(), dst_op); + MacroAssembler::St_h(src.gp(), dst_op); break; case StoreType::kI32Store: case StoreType::kI64Store32: - TurboAssembler::St_w(src.gp(), dst_op); + MacroAssembler::St_w(src.gp(), dst_op); break; case StoreType::kI64Store: - TurboAssembler::St_d(src.gp(), dst_op); + MacroAssembler::St_d(src.gp(), dst_op); break; case StoreType::kF32Store: - TurboAssembler::Fst_s(src.fp(), dst_op); + MacroAssembler::Fst_s(src.fp(), dst_op); break; case StoreType::kF64Store: - TurboAssembler::Fst_d(src.fp(), dst_op); + MacroAssembler::Fst_d(src.fp(), dst_op); break; case StoreType::kS128Store: UNREACHABLE(); @@ -887,14 +887,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); // TODO(ksreten): Handle different sizes here. - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ValueKind kind) { DCHECK_NE(dst, src); if (kind != kS128) { - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } else { UNREACHABLE(); } @@ -917,7 +917,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) { Fst_s(reg.fp(), dst); break; case kF64: - TurboAssembler::Fst_d(reg.fp(), dst); + MacroAssembler::Fst_d(reg.fp(), dst); break; case kS128: UNREACHABLE(); @@ -934,7 +934,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kI32: { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - TurboAssembler::li(scratch, Operand(value.to_i32())); + MacroAssembler::li(scratch, Operand(value.to_i32())); St_w(scratch, dst); break; } @@ -943,7 +943,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kRefNull: { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); - TurboAssembler::li(scratch, value.to_i64()); + MacroAssembler::li(scratch, value.to_i64()); St_d(scratch, dst); break; } @@ -971,7 +971,7 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) { Fld_s(reg.fp(), src); break; case kF64: - TurboAssembler::Fld_d(reg.fp(), src); + MacroAssembler::Fld_d(reg.fp(), src); break; case kS128: UNREACHABLE(); @@ -1023,16 +1023,16 @@ void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, } void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Clz_d(dst.gp(), src.gp()); + MacroAssembler::Clz_d(dst.gp(), src.gp()); } void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Ctz_d(dst.gp(), src.gp()); + MacroAssembler::Ctz_d(dst.gp(), src.gp()); } bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Popcnt_d(dst.gp(), src.gp()); + MacroAssembler::Popcnt_d(dst.gp(), src.gp()); return true; } @@ -1046,42 +1046,42 @@ void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { } void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { - TurboAssembler::Mul_w(dst, lhs, rhs); + MacroAssembler::Mul_w(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. - TurboAssembler::li(kScratchReg, 1); - TurboAssembler::li(kScratchReg2, 1); - TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); - TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); + MacroAssembler::li(kScratchReg, 1); + MacroAssembler::li(kScratchReg2, 1); + MacroAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); + MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); add_d(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div_w(dst, lhs, rhs); + MacroAssembler::Div_w(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Div_wu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Div_wu(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod_w(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod_w(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod_wu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod_wu(dst, lhs, rhs); } #define I32_BINOP(name, instruction) \ @@ -1117,15 +1117,15 @@ I32_BINOP_I(xor, Xor) #undef I32_BINOP_I void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { - TurboAssembler::Clz_w(dst, src); + MacroAssembler::Clz_w(dst, src); } void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { - TurboAssembler::Ctz_w(dst, src); + MacroAssembler::Ctz_w(dst, src); } bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { - TurboAssembler::Popcnt_w(dst, src); + MacroAssembler::Popcnt_w(dst, src); return true; } @@ -1150,55 +1150,55 @@ I32_SHIFTOP_I(shr, srl_w, srli_w) void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm) { - TurboAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm)); + MacroAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm)); } void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp()); } bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. - TurboAssembler::li(kScratchReg, 1); - TurboAssembler::li(kScratchReg2, 1); - TurboAssembler::LoadZeroOnCondition( + MacroAssembler::li(kScratchReg, 1); + MacroAssembler::li(kScratchReg2, 1); + MacroAssembler::LoadZeroOnCondition( kScratchReg, lhs.gp(), Operand(std::numeric_limits::min()), eq); - TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); + MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); add_d(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp()); return true; } @@ -1256,32 +1256,32 @@ void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) { } void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_s(dst, src); + MacroAssembler::Neg_s(dst, src); } void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_d(dst, src); + MacroAssembler::Neg_d(dst, src); } void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float32Min(dst, lhs, rhs, &ool); + MacroAssembler::Float32Min(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); + MacroAssembler::Float32MinOutOfLine(dst, lhs, rhs); bind(&done); } void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float32Max(dst, lhs, rhs, &ool); + MacroAssembler::Float32Max(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); + MacroAssembler::Float32MaxOutOfLine(dst, lhs, rhs); bind(&done); } @@ -1293,22 +1293,22 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float64Min(dst, lhs, rhs, &ool); + MacroAssembler::Float64Min(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); + MacroAssembler::Float64MinOutOfLine(dst, lhs, rhs); bind(&done); } void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float64Max(dst, lhs, rhs, &ool); + MacroAssembler::Float64Max(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); + MacroAssembler::Float64MaxOutOfLine(dst, lhs, rhs); bind(&done); } @@ -1362,7 +1362,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, LiftoffRegister src, Label* trap) { switch (opcode) { case kExprI32ConvertI64: - TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0); + MacroAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0); return true; case kExprI32SConvertF32: { LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); @@ -1370,20 +1370,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_s(rounded.fp(), src.fp()); ftintrz_w_s(kScratchDoubleReg, rounded.fp()); movfr2gr_s(dst.gp(), kScratchDoubleReg); // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, // because INT32_MIN allows easier out-of-bounds detection. - TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Add_w(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. movgr2fr_w(kScratchDoubleReg, dst.gp()); ffint_s_w(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32UConvertF32: { @@ -1392,18 +1392,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s(rounded.fp(), src.fp()); - TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); + MacroAssembler::Trunc_s(rounded.fp(), src.fp()); + MacroAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); // Avoid UINT32_MAX as an overflow indicator and use 0 instead, // because 0 allows easier out-of-bounds detection. - TurboAssembler::Add_w(kScratchReg, dst.gp(), 1); - TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); + MacroAssembler::Add_w(kScratchReg, dst.gp(), 1); + MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg); // Checking if trap. - TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); + MacroAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); fcvt_s_d(converted_back.fp(), converted_back.fp()); - TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32SConvertF64: { @@ -1412,14 +1412,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_d(rounded.fp(), src.fp()); ftintrz_w_d(kScratchDoubleReg, rounded.fp()); movfr2gr_s(dst.gp(), kScratchDoubleReg); // Checking if trap. ffint_d_w(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32UConvertF64: { @@ -1428,23 +1428,23 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d(rounded.fp(), src.fp()); - TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); + MacroAssembler::Trunc_d(rounded.fp(), src.fp()); + MacroAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); // Checking if trap. - TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); - TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::Ffint_d_uw(converted_back.fp(), dst.gp()); + MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32ReinterpretF32: - TurboAssembler::FmoveLow(dst.gp(), src.fp()); + MacroAssembler::FmoveLow(dst.gp(), src.fp()); return true; case kExprI64SConvertI32: slli_w(dst.gp(), src.gp(), 0); return true; case kExprI64UConvertI32: - TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0); + MacroAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0); return true; case kExprI64SConvertF32: { LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); @@ -1452,29 +1452,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_s(rounded.fp(), src.fp()); ftintrz_l_s(kScratchDoubleReg, rounded.fp()); movfr2gr_d(dst.gp(), kScratchDoubleReg); // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // because INT64_MIN allows easier out-of-bounds detection. - TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Add_d(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. movgr2fr_d(kScratchDoubleReg, dst.gp()); ffint_s_l(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI64UConvertF32: { // Real conversion. - TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, + MacroAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, kScratchReg); // Checking if trap. - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); return true; } case kExprI64SConvertF64: { @@ -1483,29 +1483,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_d(rounded.fp(), src.fp()); ftintrz_l_d(kScratchDoubleReg, rounded.fp()); movfr2gr_d(dst.gp(), kScratchDoubleReg); // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // because INT64_MIN allows easier out-of-bounds detection. - TurboAssembler::Add_d(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Add_d(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. movgr2fr_d(kScratchDoubleReg, dst.gp()); ffint_d_l(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI64UConvertF64: { // Real conversion. - TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, + MacroAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, kScratchReg); // Checking if trap. - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); return true; } case kExprI64ReinterpretF64: @@ -1518,13 +1518,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, return true; } case kExprF32UConvertI32: - TurboAssembler::Ffint_s_uw(dst.fp(), src.gp()); + MacroAssembler::Ffint_s_uw(dst.fp(), src.gp()); return true; case kExprF32ConvertF64: fcvt_s_d(dst.fp(), src.fp()); return true; case kExprF32ReinterpretI32: - TurboAssembler::FmoveLow(dst.fp(), src.gp()); + MacroAssembler::FmoveLow(dst.fp(), src.gp()); return true; case kExprF64SConvertI32: { LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); @@ -1533,7 +1533,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, return true; } case kExprF64UConvertI32: - TurboAssembler::Ffint_d_uw(dst.fp(), src.gp()); + MacroAssembler::Ffint_d_uw(dst.fp(), src.gp()); return true; case kExprF64ConvertF32: fcvt_d_s(dst.fp(), src.fp()); @@ -1548,7 +1548,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI32UConvertSatF32: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF32(src.fp(), kScratchDoubleReg, CULE); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Ftintrz_uw_s(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1562,7 +1562,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI32UConvertSatF64: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF64(src.fp(), kScratchDoubleReg, CULE); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Ftintrz_uw_d(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1576,7 +1576,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI64UConvertSatF32: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF32(src.fp(), kScratchDoubleReg, CULE); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1590,7 +1590,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI64UConvertSatF64: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF64(src.fp(), kScratchDoubleReg, CULE); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1626,11 +1626,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, } void LiftoffAssembler::emit_jump(Label* label) { - TurboAssembler::Branch(label); + MacroAssembler::Branch(label); } void LiftoffAssembler::emit_jump(Register target) { - TurboAssembler::Jump(target); + MacroAssembler::Jump(target); } void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, @@ -1639,25 +1639,25 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, const FreezeCacheState& frozen) { if (rhs == no_reg) { DCHECK(kind == kI32 || kind == kI64); - TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); + MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg)); } else { DCHECK((kind == kI32 || kind == kI64) || (is_reference(kind) && (cond == kEqual || cond == kNotEqual))); - TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); + MacroAssembler::Branch(label, cond, lhs, Operand(rhs)); } } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - TurboAssembler::Branch(label, cond, lhs, Operand(imm)); + MacroAssembler::Branch(label, cond, lhs, Operand(imm)); } void LiftoffAssembler::emit_i32_subi_jump_negative( Register value, int subtrahend, Label* result_negative, const FreezeCacheState& frozen) { - TurboAssembler::Sub_d(value, value, Operand(subtrahend)); - TurboAssembler::Branch(result_negative, less, value, Operand(zero_reg)); + MacroAssembler::Sub_d(value, value, Operand(subtrahend)); + MacroAssembler::Branch(result_negative, less, value, Operand(zero_reg)); } void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { @@ -1671,14 +1671,14 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); } // Write 1 as result. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If negative condition is true, write 0 as result. Condition neg_cond = NegateCondition(cond); - TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); + MacroAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); // If tmp != dst, result will be moved. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { @@ -1693,15 +1693,15 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); } // Write 1 as result. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If negative condition is true, write 0 as result. Condition neg_cond = NegateCondition(cond); - TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), + MacroAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), neg_cond); // If tmp != dst, result will be moved. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } namespace liftoff { @@ -1740,26 +1740,26 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label not_nan, cont; - TurboAssembler::CompareIsNanF32(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); + MacroAssembler::CompareIsNanF32(lhs, rhs); + MacroAssembler::BranchFalseF(¬_nan); // If one of the operands is NaN, return 1 for f32.ne, else 0. if (cond == ne) { - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); } else { - TurboAssembler::Move(dst, zero_reg); + MacroAssembler::Move(dst, zero_reg); } - TurboAssembler::Branch(&cont); + MacroAssembler::Branch(&cont); bind(¬_nan); - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); bool predicate; FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); - TurboAssembler::CompareF32(lhs, rhs, fcond); + MacroAssembler::CompareF32(lhs, rhs, fcond); if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); + MacroAssembler::LoadZeroIfNotFPUCondition(dst); } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); + MacroAssembler::LoadZeroIfFPUCondition(dst); } bind(&cont); @@ -1769,26 +1769,26 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label not_nan, cont; - TurboAssembler::CompareIsNanF64(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); + MacroAssembler::CompareIsNanF64(lhs, rhs); + MacroAssembler::BranchFalseF(¬_nan); // If one of the operands is NaN, return 1 for f64.ne, else 0. if (cond == ne) { - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); } else { - TurboAssembler::Move(dst, zero_reg); + MacroAssembler::Move(dst, zero_reg); } - TurboAssembler::Branch(&cont); + MacroAssembler::Branch(&cont); bind(¬_nan); - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); bool predicate; FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); - TurboAssembler::CompareF64(lhs, rhs, fcond); + MacroAssembler::CompareF64(lhs, rhs, fcond); if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); + MacroAssembler::LoadZeroIfNotFPUCondition(dst); } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); + MacroAssembler::LoadZeroIfFPUCondition(dst); } bind(&cont); @@ -3001,8 +3001,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst, } void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { - TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0)); - TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); + MacroAssembler::Ld_d(limit_address, MemOperand(limit_address, 0)); + MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); } void LiftoffAssembler::CallTrapCallbackForTesting() { @@ -3036,7 +3036,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { unsigned offset = 0; while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset)); + MacroAssembler::Fst_d(reg.fp(), MemOperand(sp, offset)); fp_regs.clear(reg); offset += slot_size; } @@ -3049,7 +3049,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { unsigned fp_offset = 0; while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset)); + MacroAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset)); fp_regs.clear(reg); fp_offset += 8; } @@ -3168,7 +3168,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { addi_d(sp, sp, -size); - TurboAssembler::Move(addr, sp); + MacroAssembler::Move(addr, sp); } void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { diff --git a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index df772ab554..22eda69815 100644 --- a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -176,19 +176,19 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst); V8_FALLTHROUGH; case LoadType::kI64Load32U: - assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4); + assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 4); break; case LoadType::kI32Load: case LoadType::kI64Load32S: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); break; case LoadType::kI32Load16S: case LoadType::kI64Load16S: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); break; case LoadType::kI32Load16U: case LoadType::kI64Load16U: - assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); + assm->MacroAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); break; case LoadType::kF64Load: is_float = true; @@ -196,7 +196,7 @@ inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst); V8_FALLTHROUGH; case LoadType::kI64Load: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); break; default: UNREACHABLE(); @@ -231,10 +231,10 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src); V8_FALLTHROUGH; case StoreType::kI32Store: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); break; case StoreType::kI32Store16: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); break; case StoreType::kF64Store: is_float = true; @@ -242,13 +242,13 @@ inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src); V8_FALLTHROUGH; case StoreType::kI64Store: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 8); break; case StoreType::kI64Store32: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); break; case StoreType::kI64Store16: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); + assm->MacroAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); break; default: UNREACHABLE(); @@ -340,7 +340,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( // We can't run out of space, just pass anything big enough to not cause the // assembler to try to grow the buffer. constexpr int kAvailableSpace = 256; - TurboAssembler patching_assembler( + MacroAssembler patching_assembler( nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); @@ -429,16 +429,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: - TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); break; case kF32: - TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); break; default: UNREACHABLE(); @@ -547,30 +547,30 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, break; case LoadType::kI32Load16U: case LoadType::kI64Load16U: - TurboAssembler::Ulhu(dst.gp(), src_op); + MacroAssembler::Ulhu(dst.gp(), src_op); break; case LoadType::kI32Load16S: case LoadType::kI64Load16S: - TurboAssembler::Ulh(dst.gp(), src_op); + MacroAssembler::Ulh(dst.gp(), src_op); break; case LoadType::kI64Load32U: - TurboAssembler::Ulwu(dst.gp(), src_op); + MacroAssembler::Ulwu(dst.gp(), src_op); break; case LoadType::kI32Load: case LoadType::kI64Load32S: - TurboAssembler::Ulw(dst.gp(), src_op); + MacroAssembler::Ulw(dst.gp(), src_op); break; case LoadType::kI64Load: - TurboAssembler::Uld(dst.gp(), src_op); + MacroAssembler::Uld(dst.gp(), src_op); break; case LoadType::kF32Load: - TurboAssembler::Ulwc1(dst.fp(), src_op, t8); + MacroAssembler::Ulwc1(dst.fp(), src_op, t8); break; case LoadType::kF64Load: - TurboAssembler::Uldc1(dst.fp(), src_op, t8); + MacroAssembler::Uldc1(dst.fp(), src_op, t8); break; case LoadType::kS128Load: - TurboAssembler::ld_b(dst.fp().toW(), src_op); + MacroAssembler::ld_b(dst.fp().toW(), src_op); break; default: UNREACHABLE(); @@ -613,23 +613,23 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, break; case StoreType::kI32Store16: case StoreType::kI64Store16: - TurboAssembler::Ush(src.gp(), dst_op, t8); + MacroAssembler::Ush(src.gp(), dst_op, t8); break; case StoreType::kI32Store: case StoreType::kI64Store32: - TurboAssembler::Usw(src.gp(), dst_op); + MacroAssembler::Usw(src.gp(), dst_op); break; case StoreType::kI64Store: - TurboAssembler::Usd(src.gp(), dst_op); + MacroAssembler::Usd(src.gp(), dst_op); break; case StoreType::kF32Store: - TurboAssembler::Uswc1(src.fp(), dst_op, t8); + MacroAssembler::Uswc1(src.fp(), dst_op, t8); break; case StoreType::kF64Store: - TurboAssembler::Usdc1(src.fp(), dst_op, t8); + MacroAssembler::Usdc1(src.fp(), dst_op, t8); break; case StoreType::kS128Store: - TurboAssembler::st_b(src.fp().toW(), dst_op); + MacroAssembler::st_b(src.fp().toW(), dst_op); break; default: UNREACHABLE(); @@ -987,16 +987,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); // TODO(ksreten): Handle different sizes here. - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ValueKind kind) { DCHECK_NE(dst, src); if (kind != kS128) { - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } else { - TurboAssembler::move_v(dst.toW(), src.toW()); + MacroAssembler::move_v(dst.toW(), src.toW()); } } @@ -1017,10 +1017,10 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) { Swc1(reg.fp(), dst); break; case kF64: - TurboAssembler::Sdc1(reg.fp(), dst); + MacroAssembler::Sdc1(reg.fp(), dst); break; case kS128: - TurboAssembler::st_b(reg.fp().toW(), dst); + MacroAssembler::st_b(reg.fp().toW(), dst); break; default: UNREACHABLE(); @@ -1032,14 +1032,14 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { MemOperand dst = liftoff::GetStackSlot(offset); switch (value.type().kind()) { case kI32: { - TurboAssembler::li(kScratchReg, Operand(value.to_i32())); + MacroAssembler::li(kScratchReg, Operand(value.to_i32())); Sw(kScratchReg, dst); break; } case kI64: case kRef: case kRefNull: { - TurboAssembler::li(kScratchReg, value.to_i64()); + MacroAssembler::li(kScratchReg, value.to_i64()); Sd(kScratchReg, dst); break; } @@ -1065,10 +1065,10 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) { Lwc1(reg.fp(), src); break; case kF64: - TurboAssembler::Ldc1(reg.fp(), src); + MacroAssembler::Ldc1(reg.fp(), src); break; case kS128: - TurboAssembler::ld_b(reg.fp().toW(), src); + MacroAssembler::ld_b(reg.fp().toW(), src); break; default: UNREACHABLE(); @@ -1117,16 +1117,16 @@ void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, } void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Dclz(dst.gp(), src.gp()); + MacroAssembler::Dclz(dst.gp(), src.gp()); } void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Dctz(dst.gp(), src.gp()); + MacroAssembler::Dctz(dst.gp(), src.gp()); } bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Dpopcnt(dst.gp(), src.gp()); + MacroAssembler::Dpopcnt(dst.gp(), src.gp()); return true; } @@ -1140,42 +1140,42 @@ void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { } void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { - TurboAssembler::Mul(dst, lhs, rhs); + MacroAssembler::Mul(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. - TurboAssembler::li(kScratchReg, 1); - TurboAssembler::li(kScratchReg2, 1); - TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); - TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); + MacroAssembler::li(kScratchReg, 1); + MacroAssembler::li(kScratchReg2, 1); + MacroAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); + MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); daddu(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div(dst, lhs, rhs); + MacroAssembler::Div(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Divu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Divu(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Modu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Modu(dst, lhs, rhs); } #define I32_BINOP(name, instruction) \ @@ -1211,15 +1211,15 @@ I32_BINOP_I(xor, Xor) #undef I32_BINOP_I void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { - TurboAssembler::Clz(dst, src); + MacroAssembler::Clz(dst, src); } void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { - TurboAssembler::Ctz(dst, src); + MacroAssembler::Ctz(dst, src); } bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { - TurboAssembler::Popcnt(dst, src); + MacroAssembler::Popcnt(dst, src); return true; } @@ -1244,55 +1244,55 @@ I32_SHIFTOP_I(shr, srl) void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm) { - TurboAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm)); + MacroAssembler::Daddu(dst.gp(), lhs.gp(), Operand(imm)); } void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Dmul(dst.gp(), lhs.gp(), rhs.gp()); } bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. - TurboAssembler::li(kScratchReg, 1); - TurboAssembler::li(kScratchReg2, 1); - TurboAssembler::LoadZeroOnCondition( + MacroAssembler::li(kScratchReg, 1); + MacroAssembler::li(kScratchReg2, 1); + MacroAssembler::LoadZeroOnCondition( kScratchReg, lhs.gp(), Operand(std::numeric_limits::min()), eq); - TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); + MacroAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq); daddu(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Ddiv(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Ddivu(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Dmod(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Dmodu(dst.gp(), lhs.gp(), rhs.gp()); return true; } @@ -1354,32 +1354,32 @@ void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) { } void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_s(dst, src); + MacroAssembler::Neg_s(dst, src); } void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_d(dst, src); + MacroAssembler::Neg_d(dst, src); } void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float32Min(dst, lhs, rhs, &ool); + MacroAssembler::Float32Min(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); + MacroAssembler::Float32MinOutOfLine(dst, lhs, rhs); bind(&done); } void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float32Max(dst, lhs, rhs, &ool); + MacroAssembler::Float32Max(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); + MacroAssembler::Float32MaxOutOfLine(dst, lhs, rhs); bind(&done); } @@ -1410,22 +1410,22 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float64Min(dst, lhs, rhs, &ool); + MacroAssembler::Float64Min(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); + MacroAssembler::Float64MinOutOfLine(dst, lhs, rhs); bind(&done); } void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { Label ool, done; - TurboAssembler::Float64Max(dst, lhs, rhs, &ool); + MacroAssembler::Float64Max(dst, lhs, rhs, &ool); Branch(&done); bind(&ool); - TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); + MacroAssembler::Float64MaxOutOfLine(dst, lhs, rhs); bind(&done); } @@ -1498,7 +1498,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, LiftoffRegister src, Label* trap) { switch (opcode) { case kExprI32ConvertI64: - TurboAssembler::Ext(dst.gp(), src.gp(), 0, 32); + MacroAssembler::Ext(dst.gp(), src.gp(), 0, 32); return true; case kExprI32SConvertF32: { LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); @@ -1506,20 +1506,20 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_s_s(rounded.fp(), src.fp()); trunc_w_s(kScratchDoubleReg, rounded.fp()); mfc1(dst.gp(), kScratchDoubleReg); // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, // because INT32_MIN allows easier out-of-bounds detection. - TurboAssembler::Addu(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Addu(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. mtc1(dst.gp(), kScratchDoubleReg); cvt_s_w(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32UConvertF32: { @@ -1528,18 +1528,18 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); - TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); + MacroAssembler::Trunc_s_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); // Avoid UINT32_MAX as an overflow indicator and use 0 instead, // because 0 allows easier out-of-bounds detection. - TurboAssembler::Addu(kScratchReg, dst.gp(), 1); - TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); + MacroAssembler::Addu(kScratchReg, dst.gp(), 1); + MacroAssembler::Movz(dst.gp(), zero_reg, kScratchReg); // Checking if trap. - TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); + MacroAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); cvt_s_d(converted_back.fp(), converted_back.fp()); - TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32SConvertF64: { @@ -1548,14 +1548,14 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_d_d(rounded.fp(), src.fp()); trunc_w_d(kScratchDoubleReg, rounded.fp()); mfc1(dst.gp(), kScratchDoubleReg); // Checking if trap. cvt_d_w(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32UConvertF64: { @@ -1564,23 +1564,23 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); - TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); + MacroAssembler::Trunc_d_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); // Checking if trap. - TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); - TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::Cvt_d_uw(converted_back.fp(), dst.gp()); + MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI32ReinterpretF32: - TurboAssembler::FmoveLow(dst.gp(), src.fp()); + MacroAssembler::FmoveLow(dst.gp(), src.fp()); return true; case kExprI64SConvertI32: sll(dst.gp(), src.gp(), 0); return true; case kExprI64UConvertI32: - TurboAssembler::Dext(dst.gp(), src.gp(), 0, 32); + MacroAssembler::Dext(dst.gp(), src.gp(), 0, 32); return true; case kExprI64SConvertF32: { LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); @@ -1588,29 +1588,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); + MacroAssembler::Trunc_s_s(rounded.fp(), src.fp()); trunc_l_s(kScratchDoubleReg, rounded.fp()); dmfc1(dst.gp(), kScratchDoubleReg); // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // because INT64_MIN allows easier out-of-bounds detection. - TurboAssembler::Daddu(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Daddu(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. dmtc1(dst.gp(), kScratchDoubleReg); cvt_s_l(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI64UConvertF32: { // Real conversion. - TurboAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, + MacroAssembler::Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, kScratchReg); // Checking if trap. - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); return true; } case kExprI64SConvertF64: { @@ -1619,29 +1619,29 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); // Real conversion. - TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); + MacroAssembler::Trunc_d_d(rounded.fp(), src.fp()); trunc_l_d(kScratchDoubleReg, rounded.fp()); dmfc1(dst.gp(), kScratchDoubleReg); // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead, // because INT64_MIN allows easier out-of-bounds detection. - TurboAssembler::Daddu(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); + MacroAssembler::Daddu(kScratchReg, dst.gp(), 1); + MacroAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); + MacroAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); // Checking if trap. dmtc1(dst.gp(), kScratchDoubleReg); cvt_d_l(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); + MacroAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); + MacroAssembler::BranchFalseF(trap); return true; } case kExprI64UConvertF64: { // Real conversion. - TurboAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, + MacroAssembler::Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, kScratchReg); // Checking if trap. - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); return true; } case kExprI64ReinterpretF64: @@ -1654,13 +1654,13 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, return true; } case kExprF32UConvertI32: - TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_uw(dst.fp(), src.gp()); return true; case kExprF32ConvertF64: cvt_s_d(dst.fp(), src.fp()); return true; case kExprF32ReinterpretI32: - TurboAssembler::FmoveLow(dst.fp(), src.gp()); + MacroAssembler::FmoveLow(dst.fp(), src.gp()); return true; case kExprF64SConvertI32: { LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); @@ -1669,7 +1669,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, return true; } case kExprF64UConvertI32: - TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_uw(dst.fp(), src.gp()); return true; case kExprF64ConvertF32: cvt_d_s(dst.fp(), src.fp()); @@ -1688,7 +1688,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, CompareIsNanF32(src.fp(), src.fp()); BranchTrueShortF(&done); li(dst.gp(), static_cast(std::numeric_limits::min())); - TurboAssembler::Move( + MacroAssembler::Move( kScratchDoubleReg, static_cast(std::numeric_limits::min())); CompareF32(OLT, src.fp(), kScratchDoubleReg); @@ -1702,7 +1702,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI32UConvertSatF32: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF32(ULE, src.fp(), kScratchDoubleReg); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Trunc_uw_s(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1719,7 +1719,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, CompareIsNanF64(src.fp(), src.fp()); BranchTrueShortF(&done); li(dst.gp(), static_cast(std::numeric_limits::min())); - TurboAssembler::Move( + MacroAssembler::Move( kScratchDoubleReg, static_cast(std::numeric_limits::min())); CompareF64(OLT, src.fp(), kScratchDoubleReg); @@ -1733,7 +1733,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI32UConvertSatF64: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF64(ULE, src.fp(), kScratchDoubleReg); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Trunc_uw_d(dst.gp(), src.fp(), kScratchDoubleReg); @@ -1750,7 +1750,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, CompareIsNanF32(src.fp(), src.fp()); BranchTrueShortF(&done); li(dst.gp(), static_cast(std::numeric_limits::min())); - TurboAssembler::Move( + MacroAssembler::Move( kScratchDoubleReg, static_cast(std::numeric_limits::min())); CompareF32(OLT, src.fp(), kScratchDoubleReg); @@ -1764,7 +1764,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI64UConvertSatF32: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF32(ULE, src.fp(), kScratchDoubleReg); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Trunc_ul_s(dst.gp(), src.fp(), kScratchDoubleReg, no_reg); @@ -1781,7 +1781,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, CompareIsNanF64(src.fp(), src.fp()); BranchTrueShortF(&done); li(dst.gp(), static_cast(std::numeric_limits::min())); - TurboAssembler::Move( + MacroAssembler::Move( kScratchDoubleReg, static_cast(std::numeric_limits::min())); CompareF64(OLT, src.fp(), kScratchDoubleReg); @@ -1795,7 +1795,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, case kExprI64UConvertSatF64: { Label isnan_or_lessthan_or_equal_zero; mov(dst.gp(), zero_reg); - TurboAssembler::Move(kScratchDoubleReg, static_cast(0.0)); + MacroAssembler::Move(kScratchDoubleReg, static_cast(0.0)); CompareF64(ULE, src.fp(), kScratchDoubleReg); BranchTrueShortF(&isnan_or_lessthan_or_equal_zero); Trunc_ul_d(dst.gp(), src.fp(), kScratchDoubleReg, no_reg); @@ -1831,11 +1831,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, } void LiftoffAssembler::emit_jump(Label* label) { - TurboAssembler::Branch(label); + MacroAssembler::Branch(label); } void LiftoffAssembler::emit_jump(Register target) { - TurboAssembler::Jump(target); + MacroAssembler::Jump(target); } void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, @@ -1844,25 +1844,25 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, const FreezeCacheState& frozen) { if (rhs == no_reg) { DCHECK(kind == kI32 || kind == kI64); - TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); + MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg)); } else { DCHECK((kind == kI32 || kind == kI64) || (is_reference(kind) && (cond == kEqual || cond == kNotEqual))); - TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); + MacroAssembler::Branch(label, cond, lhs, Operand(rhs)); } } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - TurboAssembler::Branch(label, cond, lhs, Operand(imm)); + MacroAssembler::Branch(label, cond, lhs, Operand(imm)); } void LiftoffAssembler::emit_i32_subi_jump_negative( Register value, int subtrahend, Label* result_negative, const FreezeCacheState& frozen) { - TurboAssembler::Dsubu(value, value, Operand(subtrahend)); - TurboAssembler::Branch(result_negative, less, value, Operand(zero_reg)); + MacroAssembler::Dsubu(value, value, Operand(subtrahend)); + MacroAssembler::Branch(result_negative, less, value, Operand(zero_reg)); } void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { @@ -1876,14 +1876,14 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); } // Write 1 as result. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If negative condition is true, write 0 as result. Condition neg_cond = NegateCondition(cond); - TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); + MacroAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); // If tmp != dst, result will be moved. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { @@ -1898,15 +1898,15 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); } // Write 1 as result. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If negative condition is true, write 0 as result. Condition neg_cond = NegateCondition(cond); - TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), + MacroAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()), neg_cond); // If tmp != dst, result will be moved. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } namespace liftoff { @@ -1965,26 +1965,26 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label not_nan, cont; - TurboAssembler::CompareIsNanF32(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); + MacroAssembler::CompareIsNanF32(lhs, rhs); + MacroAssembler::BranchFalseF(¬_nan); // If one of the operands is NaN, return 1 for f32.ne, else 0. if (cond == ne) { - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); } else { - TurboAssembler::Move(dst, zero_reg); + MacroAssembler::Move(dst, zero_reg); } - TurboAssembler::Branch(&cont); + MacroAssembler::Branch(&cont); bind(¬_nan); - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); bool predicate; FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); - TurboAssembler::CompareF32(fcond, lhs, rhs); + MacroAssembler::CompareF32(fcond, lhs, rhs); if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); + MacroAssembler::LoadZeroIfNotFPUCondition(dst); } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); + MacroAssembler::LoadZeroIfFPUCondition(dst); } bind(&cont); @@ -1994,26 +1994,26 @@ void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label not_nan, cont; - TurboAssembler::CompareIsNanF64(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); + MacroAssembler::CompareIsNanF64(lhs, rhs); + MacroAssembler::BranchFalseF(¬_nan); // If one of the operands is NaN, return 1 for f64.ne, else 0. if (cond == ne) { - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); } else { - TurboAssembler::Move(dst, zero_reg); + MacroAssembler::Move(dst, zero_reg); } - TurboAssembler::Branch(&cont); + MacroAssembler::Branch(&cont); bind(¬_nan); - TurboAssembler::li(dst, 1); + MacroAssembler::li(dst, 1); bool predicate; FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(cond, &predicate); - TurboAssembler::CompareF64(fcond, lhs, rhs); + MacroAssembler::CompareF64(fcond, lhs, rhs); if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); + MacroAssembler::LoadZeroIfNotFPUCondition(dst); } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); + MacroAssembler::LoadZeroIfFPUCondition(dst); } bind(&cont); @@ -2111,7 +2111,7 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src, MemOperand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm); *protected_load_pc = pc_offset(); LoadStoreLaneParams load_params(type.mem_type().representation(), laneidx); - TurboAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op); + MacroAssembler::LoadLane(load_params.sz, dst.fp().toW(), laneidx, src_op); } void LiftoffAssembler::StoreLane(Register dst, Register offset, @@ -2121,7 +2121,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset, MemOperand dst_op = liftoff::GetMemOp(this, dst, offset, offset_imm); if (protected_store_pc) *protected_store_pc = pc_offset(); LoadStoreLaneParams store_params(type.mem_rep(), lane); - TurboAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op); + MacroAssembler::StoreLane(store_params.sz, src.fp().toW(), lane, dst_op); } void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, @@ -2228,25 +2228,25 @@ void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst, void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::FmoveLow(kScratchReg, src.fp()); + MacroAssembler::FmoveLow(kScratchReg, src.fp()); fill_w(dst.fp().toW(), kScratchReg); } void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Move(kScratchReg, src.fp()); + MacroAssembler::Move(kScratchReg, src.fp()); fill_d(dst.fp().toW(), kScratchReg); } #define SIMD_BINOP(name1, name2, type) \ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \ - TurboAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \ + MacroAssembler::ExtMulLow(type, dst.fp().toW(), src1.fp().toW(), \ src2.fp().toW()); \ } \ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \ - TurboAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \ + MacroAssembler::ExtMulHigh(type, dst.fp().toW(), src1.fp().toW(), \ src2.fp().toW()); \ } @@ -2264,7 +2264,7 @@ SIMD_BINOP(i64x2, i32x4_u, MSAU32) #define SIMD_BINOP(name1, name2, type) \ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \ LiftoffRegister dst, LiftoffRegister src) { \ - TurboAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \ + MacroAssembler::ExtAddPairwise(type, dst.fp().toW(), src.fp().toW()); \ } SIMD_BINOP(i16x8, i8x16_s, MSAS8) @@ -3455,14 +3455,14 @@ void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { copy_u_w(kScratchReg, lhs.fp().toW(), imm_lane_idx); - TurboAssembler::FmoveLow(dst.fp(), kScratchReg); + MacroAssembler::FmoveLow(dst.fp(), kScratchReg); } void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, LiftoffRegister lhs, uint8_t imm_lane_idx) { copy_s_d(kScratchReg, lhs.fp().toW(), imm_lane_idx); - TurboAssembler::Move(dst.fp(), kScratchReg); + MacroAssembler::Move(dst.fp(), kScratchReg); } void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst, @@ -3509,7 +3509,7 @@ void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx) { - TurboAssembler::FmoveLow(kScratchReg, src2.fp()); + MacroAssembler::FmoveLow(kScratchReg, src2.fp()); if (dst != src1) { move_v(dst.fp().toW(), src1.fp().toW()); } @@ -3520,7 +3520,7 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2, uint8_t imm_lane_idx) { - TurboAssembler::Move(kScratchReg, src2.fp()); + MacroAssembler::Move(kScratchReg, src2.fp()); if (dst != src1) { move_v(dst.fp().toW(), src1.fp().toW()); } @@ -3556,8 +3556,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst, } void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { - TurboAssembler::Uld(limit_address, MemOperand(limit_address)); - TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); + MacroAssembler::Uld(limit_address, MemOperand(limit_address)); + MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); } void LiftoffAssembler::CallTrapCallbackForTesting() { @@ -3592,9 +3592,9 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); if (IsEnabled(MIPS_SIMD)) { - TurboAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset)); + MacroAssembler::st_d(reg.fp().toW(), MemOperand(sp, offset)); } else { - TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset)); + MacroAssembler::Sdc1(reg.fp(), MemOperand(sp, offset)); } fp_regs.clear(reg); offset += slot_size; @@ -3609,9 +3609,9 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); if (IsEnabled(MIPS_SIMD)) { - TurboAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset)); + MacroAssembler::ld_d(reg.fp().toW(), MemOperand(sp, fp_offset)); } else { - TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset)); + MacroAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset)); } fp_regs.clear(reg); fp_offset += (IsEnabled(MIPS_SIMD) ? 16 : 8); @@ -3648,7 +3648,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint( void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { DCHECK_LT(num_stack_slots, (1 << 16) / kSystemPointerSize); // 16 bit immediate - TurboAssembler::DropAndRet(static_cast(num_stack_slots)); + MacroAssembler::DropAndRet(static_cast(num_stack_slots)); } void LiftoffAssembler::CallC(const ValueKindSig* sig, @@ -3730,7 +3730,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { Daddu(sp, sp, -size); - TurboAssembler::Move(addr, sp); + MacroAssembler::Move(addr, sp); } void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { diff --git a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index e376c75b23..4f8041a67b 100644 --- a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -62,26 +62,6 @@ inline MemOperand GetStackSlot(uint32_t offset) { inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } -inline constexpr bool UseSignedOp(Condition cond) { - switch (cond) { - case kEqual: - case kNotEqual: - case kLessThan: - case kLessThanEqual: - case kGreaterThan: - case kGreaterThanEqual: - return true; - case kUnsignedLessThan: - case kUnsignedLessThanEqual: - case kUnsignedGreaterThan: - case kUnsignedGreaterThanEqual: - return false; - default: - UNREACHABLE(); - } - return false; -} - } // namespace liftoff int LiftoffAssembler::PrepareStackFrame() { @@ -197,7 +177,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. SubS64(sp, sp, Operand(frame_size), r0); // Jump back to the start of the function, from {pc_offset()} to @@ -286,7 +266,7 @@ void LiftoffAssembler::LoadFromInstance(Register dst, Register instance, void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, Register instance, int offset) { - LoadTaggedPointerField(dst, MemOperand(instance, offset), r0); + LoadTaggedField(dst, MemOperand(instance, offset), r0); } void LiftoffAssembler::SpillInstance(Register instance) { @@ -303,7 +283,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, ShiftLeftU64(ip, offset_reg, Operand(shift_amount)); offset_reg = ip; } - LoadTaggedPointerField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0); + LoadTaggedField(dst, MemOperand(src_addr, offset_reg, offset_imm), r0); } void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr, @@ -330,7 +310,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, bind(&write_barrier); JumpIfSmi(src.gp(), &exit); if (COMPRESS_POINTERS_BOOL) { - DecompressTaggedPointer(src.gp(), src.gp()); + DecompressTagged(src.gp(), src.gp()); } CheckPageFlag(src.gp(), ip, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, @@ -692,7 +672,7 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, switch (type.value()) { case StoreType::kI32Store8: case StoreType::kI64Store8: { - TurboAssembler::AtomicExchange(dst, value.gp(), result.gp()); + MacroAssembler::AtomicExchange(dst, value.gp(), result.gp()); break; } case StoreType::kI32Store16: @@ -702,10 +682,10 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, push(scratch); ByteReverseU16(r0, value.gp(), scratch); pop(scratch); - TurboAssembler::AtomicExchange(dst, r0, result.gp()); + MacroAssembler::AtomicExchange(dst, r0, result.gp()); ByteReverseU16(result.gp(), result.gp(), ip); } else { - TurboAssembler::AtomicExchange(dst, value.gp(), result.gp()); + MacroAssembler::AtomicExchange(dst, value.gp(), result.gp()); } break; } @@ -716,20 +696,20 @@ void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, push(scratch); ByteReverseU32(r0, value.gp(), scratch); pop(scratch); - TurboAssembler::AtomicExchange(dst, r0, result.gp()); + MacroAssembler::AtomicExchange(dst, r0, result.gp()); ByteReverseU32(result.gp(), result.gp(), ip); } else { - TurboAssembler::AtomicExchange(dst, value.gp(), result.gp()); + MacroAssembler::AtomicExchange(dst, value.gp(), result.gp()); } break; } case StoreType::kI64Store: { if (is_be) { ByteReverseU64(r0, value.gp()); - TurboAssembler::AtomicExchange(dst, r0, result.gp()); + MacroAssembler::AtomicExchange(dst, r0, result.gp()); ByteReverseU64(result.gp(), result.gp()); } else { - TurboAssembler::AtomicExchange(dst, value.gp(), result.gp()); + MacroAssembler::AtomicExchange(dst, value.gp(), result.gp()); } break; } @@ -760,7 +740,7 @@ void LiftoffAssembler::AtomicCompareExchange( switch (type.value()) { case StoreType::kI32Store8: case StoreType::kI64Store8: { - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); break; } @@ -774,12 +754,12 @@ void LiftoffAssembler::AtomicCompareExchange( ByteReverseU16(new_value.gp(), new_value.gp(), scratch); ByteReverseU16(expected.gp(), expected.gp(), scratch); pop(scratch); - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); ByteReverseU16(result.gp(), result.gp(), r0); Pop(new_value.gp(), expected.gp()); } else { - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); } break; @@ -794,12 +774,12 @@ void LiftoffAssembler::AtomicCompareExchange( ByteReverseU32(new_value.gp(), new_value.gp(), scratch); ByteReverseU32(expected.gp(), expected.gp(), scratch); pop(scratch); - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); ByteReverseU32(result.gp(), result.gp(), r0); Pop(new_value.gp(), expected.gp()); } else { - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); } break; @@ -809,12 +789,12 @@ void LiftoffAssembler::AtomicCompareExchange( Push(new_value.gp(), expected.gp()); ByteReverseU64(new_value.gp(), new_value.gp()); ByteReverseU64(expected.gp(), expected.gp()); - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); ByteReverseU64(result.gp(), result.gp()); Pop(new_value.gp(), expected.gp()); } else { - TurboAssembler::AtomicCompareExchange( + MacroAssembler::AtomicCompareExchange( dst, expected.gp(), new_value.gp(), result.gp(), r0); } break; @@ -1641,7 +1621,7 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, ValueKind kind, Register lhs, Register rhs, const FreezeCacheState& frozen) { - bool use_signed = liftoff::UseSignedOp(cond); + bool use_signed = is_signed(cond); if (rhs != no_reg) { switch (kind) { @@ -1686,19 +1666,19 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, CmpS32(lhs, Operand::Zero(), r0); } - b(cond, label); + b(to_condition(cond), label); } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - bool use_signed = liftoff::UseSignedOp(cond); + bool use_signed = is_signed(cond); if (use_signed) { CmpS32(lhs, Operand(imm), r0); } else { CmpU32(lhs, Operand(imm), r0); } - b(cond, label); + b(to_condition(cond), label); } void LiftoffAssembler::emit_i32_subi_jump_negative( @@ -1719,7 +1699,7 @@ void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, Register lhs, Register rhs) { - bool use_signed = liftoff::UseSignedOp(cond); + bool use_signed = is_signed(cond); if (use_signed) { CmpS32(lhs, rhs); } else { @@ -1727,7 +1707,7 @@ void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, } Label done; mov(dst, Operand(1)); - b(liftoff::ToCondition(cond), &done); + b(to_condition(to_condition(cond)), &done); mov(dst, Operand::Zero()); bind(&done); } @@ -1744,7 +1724,7 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, LiftoffRegister lhs, LiftoffRegister rhs) { - bool use_signed = liftoff::UseSignedOp(cond); + bool use_signed = is_signed(cond); if (use_signed) { CmpS64(lhs.gp(), rhs.gp()); } else { @@ -1752,7 +1732,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, } Label done; mov(dst, Operand(1)); - b(liftoff::ToCondition(cond), &done); + b(to_condition(to_condition(cond)), &done); mov(dst, Operand::Zero()); bind(&done); } @@ -1764,7 +1744,7 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, Label nan, done; bunordered(&nan, cr0); mov(dst, Operand::Zero()); - b(NegateCondition(liftoff::ToCondition(cond)), &done, cr0); + b(NegateCondition(to_condition(to_condition(cond))), &done, cr0); mov(dst, Operand(1)); b(&done); bind(&nan); @@ -1779,7 +1759,7 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { - emit_f32_set_cond(cond, dst, lhs, rhs); + emit_f32_set_cond(to_condition(cond), dst, lhs, rhs); } bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, diff --git a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h index 4611e01382..4afaa71a21 100644 --- a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h +++ b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h @@ -73,7 +73,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( // We can't run out of space, just pass anything big enough to not cause the // assembler to try to grow the buffer. constexpr int kAvailableSpace = 256; - TurboAssembler patching_assembler( + MacroAssembler patching_assembler( nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); @@ -196,7 +196,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, Register instance, int offset) { DCHECK_LE(0, offset); - LoadTaggedPointerField(dst, MemOperand{instance, offset}); + LoadTaggedField(dst, MemOperand{instance, offset}); } void LiftoffAssembler::SpillInstance(Register instance) { @@ -206,21 +206,21 @@ void LiftoffAssembler::SpillInstance(Register instance) { void LiftoffAssembler::ResetOSRTarget() {} void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_s(dst, src); + MacroAssembler::Neg_s(dst, src); } void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_d(dst, src); + MacroAssembler::Neg_d(dst, src); } void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { - TurboAssembler::Float32Min(dst, lhs, rhs); + MacroAssembler::Float32Min(dst, lhs, rhs); } void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { - TurboAssembler::Float32Max(dst, lhs, rhs); + MacroAssembler::Float32Max(dst, lhs, rhs); } void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, @@ -230,12 +230,12 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { - TurboAssembler::Float64Min(dst, lhs, rhs); + MacroAssembler::Float64Min(dst, lhs, rhs); } void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { - TurboAssembler::Float64Max(dst, lhs, rhs); + MacroAssembler::Float64Max(dst, lhs, rhs); } void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, @@ -302,14 +302,14 @@ void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { FPUCondition fcond = ConditionToConditionCmpFPU(cond); - TurboAssembler::CompareF32(dst, fcond, lhs, rhs); + MacroAssembler::CompareF32(dst, fcond, lhs, rhs); } void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { FPUCondition fcond = ConditionToConditionCmpFPU(cond); - TurboAssembler::CompareF64(dst, fcond, lhs, rhs); + MacroAssembler::CompareF64(dst, fcond, lhs, rhs); } bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, @@ -2070,8 +2070,8 @@ void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst, } void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { - TurboAssembler::LoadWord(limit_address, MemOperand(limit_address)); - TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); + MacroAssembler::LoadWord(limit_address, MemOperand(limit_address)); + MacroAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); } void LiftoffAssembler::CallTrapCallbackForTesting() { @@ -2104,7 +2104,7 @@ void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { int32_t offset = 0; while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset)); + MacroAssembler::StoreDouble(reg.fp(), MemOperand(sp, offset)); fp_regs.clear(reg); offset += sizeof(double); } @@ -2117,7 +2117,7 @@ void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { int32_t fp_offset = 0; while (!fp_regs.is_empty()) { LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset)); + MacroAssembler::LoadDouble(reg.fp(), MemOperand(sp, fp_offset)); fp_regs.clear(reg); fp_offset += sizeof(double); } @@ -2151,7 +2151,7 @@ void LiftoffAssembler::RecordSpillsInSafepoint( } void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { - TurboAssembler::DropAndRet(static_cast(num_stack_slots)); + MacroAssembler::DropAndRet(static_cast(num_stack_slots)); } void LiftoffAssembler::CallNativeWasmCode(Address addr) { @@ -2190,7 +2190,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { AddWord(sp, sp, Operand(-size)); - TurboAssembler::Move(addr, sp); + MacroAssembler::Move(addr, sp); } void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { diff --git a/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h b/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h index 8b44f8e962..63def4f714 100644 --- a/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h +++ b/src/wasm/baseline/riscv/liftoff-assembler-riscv32.h @@ -178,22 +178,22 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: { DCHECK(RelocInfo::IsNoInfo(rmode)); int32_t low_word = value.to_i64(); int32_t high_word = value.to_i64() >> 32; - TurboAssembler::li(reg.low_gp(), Operand(low_word)); - TurboAssembler::li(reg.high_gp(), Operand(high_word)); + MacroAssembler::li(reg.low_gp(), Operand(low_word)); + MacroAssembler::li(reg.high_gp(), Operand(high_word)); break; } case kF32: - TurboAssembler::LoadFPRImmediate(reg.fp(), + MacroAssembler::LoadFPRImmediate(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::LoadFPRImmediate(reg.fp(), + MacroAssembler::LoadFPRImmediate(reg.fp(), value.to_f64_boxed().get_bits()); break; default: @@ -262,39 +262,39 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, break; case LoadType::kI64Load8U: Lbu(dst.low_gp(), src_op); - TurboAssembler::mv(dst.high_gp(), zero_reg); + MacroAssembler::mv(dst.high_gp(), zero_reg); break; case LoadType::kI32Load8S: Lb(dst.gp(), src_op); break; case LoadType::kI64Load8S: Lb(dst.low_gp(), src_op); - TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31); + MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31); break; case LoadType::kI32Load16U: - TurboAssembler::Lhu(dst.gp(), src_op); + MacroAssembler::Lhu(dst.gp(), src_op); break; case LoadType::kI64Load16U: - TurboAssembler::Lhu(dst.low_gp(), src_op); - TurboAssembler::mv(dst.high_gp(), zero_reg); + MacroAssembler::Lhu(dst.low_gp(), src_op); + MacroAssembler::mv(dst.high_gp(), zero_reg); break; case LoadType::kI32Load16S: - TurboAssembler::Lh(dst.gp(), src_op); + MacroAssembler::Lh(dst.gp(), src_op); break; case LoadType::kI64Load16S: - TurboAssembler::Lh(dst.low_gp(), src_op); - TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31); + MacroAssembler::Lh(dst.low_gp(), src_op); + MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31); break; case LoadType::kI64Load32U: - TurboAssembler::Lw(dst.low_gp(), src_op); - TurboAssembler::mv(dst.high_gp(), zero_reg); + MacroAssembler::Lw(dst.low_gp(), src_op); + MacroAssembler::mv(dst.high_gp(), zero_reg); break; case LoadType::kI64Load32S: - TurboAssembler::Lw(dst.low_gp(), src_op); - TurboAssembler::srai(dst.high_gp(), dst.low_gp(), 31); + MacroAssembler::Lw(dst.low_gp(), src_op); + MacroAssembler::srai(dst.high_gp(), dst.low_gp(), 31); break; case LoadType::kI32Load: - TurboAssembler::Lw(dst.gp(), src_op); + MacroAssembler::Lw(dst.gp(), src_op); break; case LoadType::kI64Load: { Lw(dst.low_gp(), src_op); @@ -303,16 +303,16 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, Lw(dst.high_gp(), src_op); } break; case LoadType::kF32Load: - TurboAssembler::LoadFloat(dst.fp(), src_op); + MacroAssembler::LoadFloat(dst.fp(), src_op); break; case LoadType::kF64Load: - TurboAssembler::LoadDouble(dst.fp(), src_op); + MacroAssembler::LoadDouble(dst.fp(), src_op); break; case LoadType::kS128Load: { VU.set(kScratchReg, E8, m1); Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg; if (src_op.offset() != 0) { - TurboAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); + MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); } vl(dst.fp().toV(), src_reg, 0, E8); break; @@ -362,29 +362,29 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, Sb(src.low_gp(), dst_op); break; case StoreType::kI32Store16: - TurboAssembler::Sh(src.gp(), dst_op); + MacroAssembler::Sh(src.gp(), dst_op); break; case StoreType::kI64Store16: - TurboAssembler::Sh(src.low_gp(), dst_op); + MacroAssembler::Sh(src.low_gp(), dst_op); break; case StoreType::kI32Store: - TurboAssembler::Sw(src.gp(), dst_op); + MacroAssembler::Sw(src.gp(), dst_op); break; case StoreType::kI64Store32: - TurboAssembler::Sw(src.low_gp(), dst_op); + MacroAssembler::Sw(src.low_gp(), dst_op); break; case StoreType::kI64Store: { - TurboAssembler::Sw(src.low_gp(), dst_op); + MacroAssembler::Sw(src.low_gp(), dst_op); dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm + kSystemPointerSize, scratch); - TurboAssembler::Sw(src.high_gp(), dst_op); + MacroAssembler::Sw(src.high_gp(), dst_op); break; } case StoreType::kF32Store: - TurboAssembler::StoreFloat(src.fp(), dst_op); + MacroAssembler::StoreFloat(src.fp(), dst_op); break; case StoreType::kF64Store: - TurboAssembler::StoreDouble(src.fp(), dst_op); + MacroAssembler::StoreDouble(src.fp(), dst_op); break; case StoreType::kS128Store: { VU.set(kScratchReg, E8, m1); @@ -926,14 +926,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, StoreFloat(kScratchDoubleReg, dst); break; case kF64: - TurboAssembler::LoadDouble(kScratchDoubleReg, src); - TurboAssembler::StoreDouble(kScratchDoubleReg, dst); + MacroAssembler::LoadDouble(kScratchDoubleReg, src); + MacroAssembler::StoreDouble(kScratchDoubleReg, dst); break; case kS128: { VU.set(kScratchReg, E8, m1); Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; if (src.offset() != 0) { - TurboAssembler::AddWord(src_reg, src.rm(), src.offset()); + MacroAssembler::AddWord(src_reg, src.rm(), src.offset()); } vl(kSimd128ScratchReg, src_reg, 0, E8); Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg; @@ -951,16 +951,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); // TODO(ksreten): Handle different sizes here. - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ValueKind kind) { DCHECK_NE(dst, src); if (kind != kS128) { - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } else { - TurboAssembler::vmv_vv(dst.toV(), dst.toV()); + MacroAssembler::vmv_vv(dst.toV(), dst.toV()); } } @@ -982,7 +982,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) { StoreFloat(reg.fp(), dst); break; case kF64: - TurboAssembler::StoreDouble(reg.fp(), dst); + MacroAssembler::StoreDouble(reg.fp(), dst); break; case kS128: { VU.set(kScratchReg, E8, m1); @@ -1006,7 +1006,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kRef: case kRefNull: { LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); - TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); + MacroAssembler::li(tmp.gp(), Operand(value.to_i32())); Sw(tmp.gp(), dst); break; } @@ -1015,8 +1015,8 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { int32_t low_word = value.to_i64(); int32_t high_word = value.to_i64() >> 32; - TurboAssembler::li(tmp.low_gp(), Operand(low_word)); - TurboAssembler::li(tmp.high_gp(), Operand(high_word)); + MacroAssembler::li(tmp.low_gp(), Operand(low_word)); + MacroAssembler::li(tmp.high_gp(), Operand(high_word)); Sw(tmp.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord)); Sw(tmp.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord)); @@ -1046,13 +1046,13 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) { LoadFloat(reg.fp(), src); break; case kF64: - TurboAssembler::LoadDouble(reg.fp(), src); + MacroAssembler::LoadDouble(reg.fp(), src); break; case kS128: { VU.set(kScratchReg, E8, m1); Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; if (src.offset() != 0) { - TurboAssembler::AddWord(src_reg, src.rm(), src.offset()); + MacroAssembler::AddWord(src_reg, src.rm(), src.offset()); } vl(reg.fp().toV(), src_reg, 0, E8); break; @@ -1140,8 +1140,8 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, // Produce partial popcnts in the two dst registers. Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp(); Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp(); - TurboAssembler::Popcnt32(dst.low_gp(), src1, kScratchReg); - TurboAssembler::Popcnt32(dst.high_gp(), src2, kScratchReg); + MacroAssembler::Popcnt32(dst.low_gp(), src1, kScratchReg); + MacroAssembler::Popcnt32(dst.high_gp(), src2, kScratchReg); // Now add the two into the lower dst reg and clear the higher dst reg. AddWord(dst.low_gp(), dst.low_gp(), dst.high_gp()); mv(dst.high_gp(), zero_reg); @@ -1149,40 +1149,40 @@ bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, } void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { - TurboAssembler::Mul(dst, lhs, rhs); + MacroAssembler::Mul(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. - TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); - TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); + MacroAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); + MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); add(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div(dst, lhs, rhs); + MacroAssembler::Div(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Divu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Divu(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Modu(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Modu(dst, lhs, rhs); } #define I32_BINOP(name, instruction) \ @@ -1218,15 +1218,15 @@ I32_BINOP_I(xor, Xor) #undef I32_BINOP_I void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { - TurboAssembler::Clz32(dst, src); + MacroAssembler::Clz32(dst, src); } void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { - TurboAssembler::Ctz32(dst, src); + MacroAssembler::Ctz32(dst, src); } bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { - TurboAssembler::Popcnt32(dst, src, kScratchReg); + MacroAssembler::Popcnt32(dst, src, kScratchReg); return true; } @@ -1254,7 +1254,7 @@ I32_SHIFTOP_I(shr, srli) void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), + MacroAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), kScratchReg, kScratchReg2); } @@ -1294,7 +1294,7 @@ inline bool IsRegInRegPair(LiftoffRegister pair, Register reg) { inline void Emit64BitShiftOperation( LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, Register amount, - void (TurboAssembler::*emit_shift)(Register, Register, Register, Register, + void (MacroAssembler::*emit_shift)(Register, Register, Register, Register, Register, Register, Register)) { LiftoffRegList pinned{dst, src, amount}; @@ -1313,8 +1313,8 @@ inline void Emit64BitShiftOperation( kScratchReg2); // Place result in destination register. - assm->TurboAssembler::Move(dst.high_gp(), tmp.high_gp()); - assm->TurboAssembler::Move(dst.low_gp(), tmp.low_gp()); + assm->MacroAssembler::Move(dst.high_gp(), tmp.high_gp()); + assm->MacroAssembler::Move(dst.low_gp(), tmp.low_gp()); } else { (assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(), src.high_gp(), amount_capped, kScratchReg, @@ -1325,7 +1325,7 @@ inline void Emit64BitShiftOperation( void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), + MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), kScratchReg, kScratchReg2); } @@ -1339,16 +1339,16 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, // TODO(riscv32): are there some optimization we can make without // materializing? - TurboAssembler::li(imm_reg.low_gp(), imm_low_word); - TurboAssembler::li(imm_reg.high_gp(), imm_high_word); - TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), + MacroAssembler::li(imm_reg.low_gp(), imm_low_word); + MacroAssembler::li(imm_reg.high_gp(), imm_high_word); + MacroAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), lhs.high_gp(), imm_reg.low_gp(), imm_reg.high_gp(), kScratchReg, kScratchReg2); } void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), + MacroAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), kScratchReg, kScratchReg2); } @@ -1357,7 +1357,7 @@ void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, Register amount) { ASM_CODE_COMMENT(this); liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShlPair); + &MacroAssembler::ShlPair); } void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, @@ -1374,14 +1374,14 @@ void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, DCHECK_NE(dst.low_gp(), kScratchReg); DCHECK_NE(dst.high_gp(), kScratchReg); - TurboAssembler::ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high, + MacroAssembler::ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high, amount, kScratchReg, kScratchReg2); } void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::SarPair); + &MacroAssembler::SarPair); } void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, @@ -1397,14 +1397,14 @@ void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, DCHECK_NE(dst.low_gp(), kScratchReg); DCHECK_NE(dst.high_gp(), kScratchReg); - TurboAssembler::SarPair(dst.low_gp(), dst.high_gp(), src_low, src_high, + MacroAssembler::SarPair(dst.low_gp(), dst.high_gp(), src_low, src_high, amount, kScratchReg, kScratchReg2); } void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, Register amount) { liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShrPair); + &MacroAssembler::ShrPair); } void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, @@ -1420,7 +1420,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, DCHECK_NE(dst.low_gp(), kScratchReg); DCHECK_NE(dst.high_gp(), kScratchReg); - TurboAssembler::ShrPair(dst.low_gp(), dst.high_gp(), src_low, src_high, + MacroAssembler::ShrPair(dst.low_gp(), dst.high_gp(), src_low, src_high, amount, kScratchReg, kScratchReg2); } @@ -1441,7 +1441,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, LiftoffRegister src, Label* trap) { switch (opcode) { case kExprI32ConvertI64: - TurboAssembler::Move(dst.gp(), src.low_gp()); + MacroAssembler::Move(dst.gp(), src.low_gp()); return true; case kExprI32SConvertF32: case kExprI32UConvertF32: @@ -1481,22 +1481,22 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, // Checking if trap. if (trap != nullptr) { - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); } return true; } case kExprI32ReinterpretF32: - TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); + MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); return true; case kExprI64SConvertI32: - TurboAssembler::Move(dst.low_gp(), src.gp()); - TurboAssembler::Move(dst.high_gp(), src.gp()); + MacroAssembler::Move(dst.low_gp(), src.gp()); + MacroAssembler::Move(dst.high_gp(), src.gp()); srai(dst.high_gp(), dst.high_gp(), 31); return true; case kExprI64UConvertI32: - TurboAssembler::Move(dst.low_gp(), src.gp()); - TurboAssembler::Move(dst.high_gp(), zero_reg); + MacroAssembler::Move(dst.low_gp(), src.gp()); + MacroAssembler::Move(dst.high_gp(), zero_reg); return true; case kExprI64ReinterpretF64: SubWord(sp, sp, kDoubleSize); @@ -1506,21 +1506,21 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, AddWord(sp, sp, kDoubleSize); return true; case kExprF32SConvertI32: { - TurboAssembler::Cvt_s_w(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_w(dst.fp(), src.gp()); return true; } case kExprF32UConvertI32: - TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_uw(dst.fp(), src.gp()); return true; case kExprF32ReinterpretI32: fmv_w_x(dst.fp(), src.gp()); return true; case kExprF64SConvertI32: { - TurboAssembler::Cvt_d_w(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_w(dst.fp(), src.gp()); return true; } case kExprF64UConvertI32: - TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_uw(dst.fp(), src.gp()); return true; case kExprF64ConvertF32: fcvt_d_s(dst.fp(), src.fp()); @@ -1591,11 +1591,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, } void LiftoffAssembler::emit_jump(Label* label) { - TurboAssembler::Branch(label); + MacroAssembler::Branch(label); } void LiftoffAssembler::emit_jump(Register target) { - TurboAssembler::Jump(target); + MacroAssembler::Jump(target); } void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, @@ -1604,34 +1604,34 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, const FreezeCacheState& frozen) { if (rhs == no_reg) { DCHECK(kind == kI32); - TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); + MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg)); } else { DCHECK((kind == kI32) || (is_reference(kind) && (cond == kEqual || cond == kNotEqual))); - TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); + MacroAssembler::Branch(label, cond, lhs, Operand(rhs)); } } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - TurboAssembler::Branch(label, cond, lhs, Operand(imm)); + MacroAssembler::Branch(label, cond, lhs, Operand(imm)); } void LiftoffAssembler::emit_i32_subi_jump_negative( Register value, int subtrahend, Label* result_negative, const FreezeCacheState& frozen) { SubWord(value, value, Operand(subtrahend)); - TurboAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); + MacroAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); } void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { - TurboAssembler::Sltu(dst, src, 1); + MacroAssembler::Sltu(dst, src, 1); } void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, Register lhs, Register rhs) { - TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond); + MacroAssembler::CompareI(dst, lhs, Operand(rhs), cond); } void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { @@ -1675,7 +1675,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, } // Write 1 initially in tmp register. - TurboAssembler::li(tmp, 1); + MacroAssembler::li(tmp, 1); // If high words are equal, then compare low words, else compare high. Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp())); @@ -1701,7 +1701,7 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, } bind(&cont); // Move result to dst register if needed. - TurboAssembler::Move(dst, tmp); + MacroAssembler::Move(dst, tmp); } void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { diff --git a/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h b/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h index c5cdebcbee..afdc3e6a1c 100644 --- a/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h +++ b/src/wasm/baseline/riscv/liftoff-assembler-riscv64.h @@ -153,17 +153,17 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, RelocInfo::Mode rmode) { switch (value.type().kind()) { case kI32: - TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); break; case kI64: - TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); + MacroAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); break; case kF32: - TurboAssembler::LoadFPRImmediate(reg.fp(), + MacroAssembler::LoadFPRImmediate(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::LoadFPRImmediate(reg.fp(), + MacroAssembler::LoadFPRImmediate(reg.fp(), value.to_f64_boxed().get_bits()); break; default: @@ -237,33 +237,33 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, break; case LoadType::kI32Load16U: case LoadType::kI64Load16U: - TurboAssembler::Lhu(dst.gp(), src_op); + MacroAssembler::Lhu(dst.gp(), src_op); break; case LoadType::kI32Load16S: case LoadType::kI64Load16S: - TurboAssembler::Lh(dst.gp(), src_op); + MacroAssembler::Lh(dst.gp(), src_op); break; case LoadType::kI64Load32U: - TurboAssembler::Lwu(dst.gp(), src_op); + MacroAssembler::Lwu(dst.gp(), src_op); break; case LoadType::kI32Load: case LoadType::kI64Load32S: - TurboAssembler::Lw(dst.gp(), src_op); + MacroAssembler::Lw(dst.gp(), src_op); break; case LoadType::kI64Load: - TurboAssembler::Ld(dst.gp(), src_op); + MacroAssembler::Ld(dst.gp(), src_op); break; case LoadType::kF32Load: - TurboAssembler::LoadFloat(dst.fp(), src_op); + MacroAssembler::LoadFloat(dst.fp(), src_op); break; case LoadType::kF64Load: - TurboAssembler::LoadDouble(dst.fp(), src_op); + MacroAssembler::LoadDouble(dst.fp(), src_op); break; case LoadType::kS128Load: { VU.set(kScratchReg, E8, m1); Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg; if (src_op.offset() != 0) { - TurboAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); + MacroAssembler::AddWord(src_reg, src_op.rm(), src_op.offset()); } vl(dst.fp().toV(), src_reg, 0, E8); break; @@ -310,20 +310,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, break; case StoreType::kI32Store16: case StoreType::kI64Store16: - TurboAssembler::Sh(src.gp(), dst_op); + MacroAssembler::Sh(src.gp(), dst_op); break; case StoreType::kI32Store: case StoreType::kI64Store32: - TurboAssembler::Sw(src.gp(), dst_op); + MacroAssembler::Sw(src.gp(), dst_op); break; case StoreType::kI64Store: - TurboAssembler::Sd(src.gp(), dst_op); + MacroAssembler::Sd(src.gp(), dst_op); break; case StoreType::kF32Store: - TurboAssembler::StoreFloat(src.fp(), dst_op); + MacroAssembler::StoreFloat(src.fp(), dst_op); break; case StoreType::kF64Store: - TurboAssembler::StoreDouble(src.fp(), dst_op); + MacroAssembler::StoreDouble(src.fp(), dst_op); break; case StoreType::kS128Store: { VU.set(kScratchReg, E8, m1); @@ -692,14 +692,14 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, StoreFloat(kScratchDoubleReg, dst); break; case kF64: - TurboAssembler::LoadDouble(kScratchDoubleReg, src); - TurboAssembler::StoreDouble(kScratchDoubleReg, dst); + MacroAssembler::LoadDouble(kScratchDoubleReg, src); + MacroAssembler::StoreDouble(kScratchDoubleReg, dst); break; case kS128: { VU.set(kScratchReg, E8, m1); Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; if (src.offset() != 0) { - TurboAssembler::Add64(src_reg, src.rm(), src.offset()); + MacroAssembler::Add64(src_reg, src.rm(), src.offset()); } vl(kSimd128ScratchReg, src_reg, 0, E8); Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg; @@ -720,16 +720,16 @@ void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { DCHECK_NE(dst, src); // TODO(ksreten): Handle different sizes here. - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, ValueKind kind) { DCHECK_NE(dst, src); if (kind != kS128) { - TurboAssembler::Move(dst, src); + MacroAssembler::Move(dst, src); } else { - TurboAssembler::vmv_vv(dst.toV(), src.toV()); + MacroAssembler::vmv_vv(dst.toV(), src.toV()); } } @@ -750,7 +750,7 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) { StoreFloat(reg.fp(), dst); break; case kF64: - TurboAssembler::StoreDouble(reg.fp(), dst); + MacroAssembler::StoreDouble(reg.fp(), dst); break; case kS128: { VU.set(kScratchReg, E8, m1); @@ -773,7 +773,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kI32: { UseScratchRegisterScope temps(this); Register tmp = temps.Acquire(); - TurboAssembler::li(tmp, Operand(value.to_i32())); + MacroAssembler::li(tmp, Operand(value.to_i32())); Sw(tmp, dst); break; } @@ -782,7 +782,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { case kRefNull: { UseScratchRegisterScope temps(this); Register tmp = temps.Acquire(); - TurboAssembler::li(tmp, value.to_i64()); + MacroAssembler::li(tmp, value.to_i64()); Sd(tmp, dst); break; } @@ -808,13 +808,13 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) { LoadFloat(reg.fp(), src); break; case kF64: - TurboAssembler::LoadDouble(reg.fp(), src); + MacroAssembler::LoadDouble(reg.fp(), src); break; case kS128: { VU.set(kScratchReg, E8, m1); Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg; if (src.offset() != 0) { - TurboAssembler::Add64(src_reg, src.rm(), src.offset()); + MacroAssembler::Add64(src_reg, src.rm(), src.offset()); } vl(reg.fp().toV(), src_reg, 0, E8); break; @@ -861,54 +861,54 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Clz64(dst.gp(), src.gp()); + MacroAssembler::Clz64(dst.gp(), src.gp()); } void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Ctz64(dst.gp(), src.gp()); + MacroAssembler::Ctz64(dst.gp(), src.gp()); } bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, LiftoffRegister src) { - TurboAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg); + MacroAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg); return true; } void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { - TurboAssembler::Mul32(dst, lhs, rhs); + MacroAssembler::Mul32(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. - TurboAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); - TurboAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); + MacroAssembler::CompareI(kScratchReg, lhs, Operand(kMinInt), ne); + MacroAssembler::CompareI(kScratchReg2, rhs, Operand(-1), ne); add(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div32(dst, lhs, rhs); + MacroAssembler::Div32(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Divu32(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Divu32(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod32(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Mod32(dst, lhs, rhs); } void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Modu32(dst, lhs, rhs); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); + MacroAssembler::Modu32(dst, lhs, rhs); } #define I32_BINOP(name, instruction) \ @@ -944,15 +944,15 @@ I32_BINOP_I(xor, Xor) #undef I32_BINOP_I void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { - TurboAssembler::Clz32(dst, src); + MacroAssembler::Clz32(dst, src); } void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { - TurboAssembler::Ctz32(dst, src); + MacroAssembler::Ctz32(dst, src); } bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { - TurboAssembler::Popcnt32(dst, src, kScratchReg); + MacroAssembler::Popcnt32(dst, src, kScratchReg); return true; } @@ -980,48 +980,48 @@ I32_SHIFTOP_I(shr, srliw) void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Mul64(dst.gp(), lhs.gp(), rhs.gp()); } bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero, Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable. - TurboAssembler::CompareI(kScratchReg, lhs.gp(), + MacroAssembler::CompareI(kScratchReg, lhs.gp(), Operand(std::numeric_limits::min()), ne); - TurboAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne); + MacroAssembler::CompareI(kScratchReg2, rhs.gp(), Operand(-1), ne); add(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, + MacroAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, Operand(zero_reg)); - TurboAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Div64(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Divu64(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Mod64(dst.gp(), lhs.gp(), rhs.gp()); return true; } bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs, Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); - TurboAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp()); + MacroAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg)); + MacroAssembler::Modu64(dst.gp(), lhs.gp(), rhs.gp()); return true; } @@ -1098,7 +1098,7 @@ void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm) { - TurboAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm)); + MacroAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm)); } void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) { ZeroExtendWord(dst, src); @@ -1125,7 +1125,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, // According to WebAssembly spec, if I64 value does not fit the range of // I32, the value is undefined. Therefore, We use sign extension to // implement I64 to I32 truncation - TurboAssembler::SignExtendWord(dst.gp(), src.gp()); + MacroAssembler::SignExtendWord(dst.gp(), src.gp()); return true; case kExprI32SConvertF32: case kExprI32UConvertF32: @@ -1172,39 +1172,39 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, // Checking if trap. if (trap != nullptr) { - TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); + MacroAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg)); } return true; } case kExprI32ReinterpretF32: - TurboAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); + MacroAssembler::ExtractLowWordFromF64(dst.gp(), src.fp()); return true; case kExprI64SConvertI32: - TurboAssembler::SignExtendWord(dst.gp(), src.gp()); + MacroAssembler::SignExtendWord(dst.gp(), src.gp()); return true; case kExprI64UConvertI32: - TurboAssembler::ZeroExtendWord(dst.gp(), src.gp()); + MacroAssembler::ZeroExtendWord(dst.gp(), src.gp()); return true; case kExprI64ReinterpretF64: fmv_x_d(dst.gp(), src.fp()); return true; case kExprF32SConvertI32: { - TurboAssembler::Cvt_s_w(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_w(dst.fp(), src.gp()); return true; } case kExprF32UConvertI32: - TurboAssembler::Cvt_s_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_s_uw(dst.fp(), src.gp()); return true; case kExprF32ReinterpretI32: fmv_w_x(dst.fp(), src.gp()); return true; case kExprF64SConvertI32: { - TurboAssembler::Cvt_d_w(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_w(dst.fp(), src.gp()); return true; } case kExprF64UConvertI32: - TurboAssembler::Cvt_d_uw(dst.fp(), src.gp()); + MacroAssembler::Cvt_d_uw(dst.fp(), src.gp()); return true; case kExprF64ConvertF32: fcvt_d_s(dst.fp(), src.fp()); @@ -1286,11 +1286,11 @@ void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, } void LiftoffAssembler::emit_jump(Label* label) { - TurboAssembler::Branch(label); + MacroAssembler::Branch(label); } void LiftoffAssembler::emit_jump(Register target) { - TurboAssembler::Jump(target); + MacroAssembler::Jump(target); } void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, @@ -1299,44 +1299,44 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, const FreezeCacheState& frozen) { if (rhs == no_reg) { DCHECK(kind == kI32 || kind == kI64); - TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); + MacroAssembler::Branch(label, cond, lhs, Operand(zero_reg)); } else { DCHECK((kind == kI32 || kind == kI64) || (is_reference(kind) && (cond == kEqual || cond == kNotEqual))); - TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); + MacroAssembler::Branch(label, cond, lhs, Operand(rhs)); } } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - TurboAssembler::Branch(label, cond, lhs, Operand(imm)); + MacroAssembler::Branch(label, cond, lhs, Operand(imm)); } void LiftoffAssembler::emit_i32_subi_jump_negative( Register value, int subtrahend, Label* result_negative, const FreezeCacheState& frozen) { Sub64(value, value, Operand(subtrahend)); - TurboAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); + MacroAssembler::Branch(result_negative, lt, value, Operand(zero_reg)); } void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { - TurboAssembler::Sltu(dst, src, 1); + MacroAssembler::Sltu(dst, src, 1); } void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, Register lhs, Register rhs) { - TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond); + MacroAssembler::CompareI(dst, lhs, Operand(rhs), cond); } void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { - TurboAssembler::Sltu(dst, src.gp(), 1); + MacroAssembler::Sltu(dst, src.gp(), 1); } void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, LiftoffRegister lhs, LiftoffRegister rhs) { - TurboAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond); + MacroAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond); } void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { diff --git a/src/wasm/baseline/s390/liftoff-assembler-s390.h b/src/wasm/baseline/s390/liftoff-assembler-s390.h index d4f92e2031..1e621fb1d1 100644 --- a/src/wasm/baseline/s390/liftoff-assembler-s390.h +++ b/src/wasm/baseline/s390/liftoff-assembler-s390.h @@ -18,26 +18,6 @@ namespace wasm { namespace liftoff { -inline constexpr bool UseSignedOp(Condition cond) { - switch (cond) { - case kEqual: - case kNotEqual: - case kLessThan: - case kLessThanEqual: - case kGreaterThan: - case kGreaterThanEqual: - return true; - case kUnsignedLessThan: - case kUnsignedLessThanEqual: - case kUnsignedGreaterThan: - case kUnsignedGreaterThanEqual: - return false; - default: - UNREACHABLE(); - } - return false; -} - // half // slot Frame // -----+--------------------+--------------------------- @@ -182,7 +162,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. lay(sp, MemOperand(sp, -frame_size)); // Jump back to the start of the function, from {pc_offset()} to @@ -266,7 +246,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, Register instance, int offset) { DCHECK_LE(0, offset); - LoadTaggedPointerField(dst, MemOperand(instance, offset)); + LoadTaggedField(dst, MemOperand(instance, offset)); } void LiftoffAssembler::SpillInstance(Register instance) { @@ -284,7 +264,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, ShiftLeftU64(ip, offset_reg, Operand(shift_amount)); offset_reg = ip; } - LoadTaggedPointerField( + LoadTaggedField( dst, MemOperand(src_addr, offset_reg == no_reg ? r0 : offset_reg, offset_imm)); } @@ -315,7 +295,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, bind(&write_barrier); JumpIfSmi(src.gp(), &exit); if (COMPRESS_POINTERS_BOOL) { - DecompressTaggedPointer(src.gp(), src.gp()); + DecompressTagged(src.gp(), src.gp()); } CheckPageFlag(src.gp(), r1, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, @@ -2106,7 +2086,7 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, ValueKind kind, Register lhs, Register rhs, const FreezeCacheState& frozen) { - bool use_signed = liftoff::UseSignedOp(cond); + bool use_signed = is_signed(cond); if (rhs != no_reg) { switch (kind) { @@ -2151,19 +2131,19 @@ void LiftoffAssembler::emit_cond_jump(Condition cond, Label* label, CmpS32(lhs, Operand::Zero()); } - b(cond, label); + b(to_condition(cond), label); } void LiftoffAssembler::emit_i32_cond_jumpi(Condition cond, Label* label, Register lhs, int32_t imm, const FreezeCacheState& frozen) { - bool use_signed = liftoff::UseSignedOp(cond); + bool use_signed = is_signed(cond); if (use_signed) { CmpS32(lhs, Operand(imm)); } else { CmpU32(lhs, Operand(imm)); } - b(cond, label); + b(to_condition(cond), label); } #define EMIT_EQZ(test, src) \ @@ -2198,14 +2178,14 @@ void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { void LiftoffAssembler::emit_i32_set_cond(Condition cond, Register dst, Register lhs, Register rhs) { - bool use_signed = liftoff::UseSignedOp(cond); + bool use_signed = is_signed(cond); if (use_signed) { CmpS32(lhs, rhs); } else { CmpU32(lhs, rhs); } - EMIT_SET_CONDITION(dst, cond); + EMIT_SET_CONDITION(dst, to_condition(cond)); } void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { @@ -2215,28 +2195,28 @@ void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, LiftoffRegister lhs, LiftoffRegister rhs) { - bool use_signed = liftoff::UseSignedOp(cond); + bool use_signed = is_signed(cond); if (use_signed) { CmpS64(lhs.gp(), rhs.gp()); } else { CmpU64(lhs.gp(), rhs.gp()); } - EMIT_SET_CONDITION(dst, cond); + EMIT_SET_CONDITION(dst, to_condition(cond)); } void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { cebr(lhs, rhs); - EMIT_SET_CONDITION(dst, cond); + EMIT_SET_CONDITION(dst, to_condition(cond)); } void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { cdbr(lhs, rhs); - EMIT_SET_CONDITION(dst, cond); + EMIT_SET_CONDITION(dst, to_condition(cond)); } bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, @@ -2966,7 +2946,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { void LiftoffAssembler::AssertUnreachable(AbortReason reason) { // Asserts unreachable within the wasm code. - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { @@ -3120,7 +3100,7 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { lay(sp, MemOperand(sp, -size)); - TurboAssembler::Move(addr, sp); + MacroAssembler::Move(addr, sp); } void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { diff --git a/src/wasm/baseline/x64/liftoff-assembler-x64.h b/src/wasm/baseline/x64/liftoff-assembler-x64.h index e1a98a890a..5c0c3d4ac3 100644 --- a/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -66,7 +66,7 @@ inline Operand GetMemOp(LiftoffAssembler* assm, Register addr, } // Offset immediate does not fit in 31 bits. Register scratch = kScratchRegister; - assm->TurboAssembler::Move(scratch, offset_imm); + assm->MacroAssembler::Move(scratch, offset_imm); if (offset_reg != no_reg) assm->addq(scratch, offset_reg); return Operand(addr, scratch, scale_factor, 0); } @@ -270,7 +270,7 @@ void LiftoffAssembler::PatchPrepareStackFrame( bind(&continuation); // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; consult {TurboAssembler::AllocateStackSpace}. + // decrementing the SP; consult {MacroAssembler::AllocateStackSpace}. AllocateStackSpace(frame_size); // Jump back to the start of the function, from {pc_offset()} to @@ -309,16 +309,16 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, break; case kI64: if (RelocInfo::IsNoInfo(rmode)) { - TurboAssembler::Move(reg.gp(), value.to_i64()); + MacroAssembler::Move(reg.gp(), value.to_i64()); } else { movq(reg.gp(), Immediate64(value.to_i64(), rmode)); } break; case kF32: - TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); break; case kF64: - TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); + MacroAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); break; default: UNREACHABLE(); @@ -352,7 +352,7 @@ void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, Register instance, int offset) { DCHECK_LE(0, offset); - LoadTaggedPointerField(dst, Operand(instance, offset)); + LoadTaggedField(dst, Operand(instance, offset)); } void LiftoffAssembler::LoadExternalPointer(Register dst, Register instance, @@ -381,7 +381,7 @@ void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, static_cast(offset_imm), scale_factor); - LoadTaggedPointerField(dst, src_op); + LoadTaggedField(dst, src_op); } void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr, @@ -414,7 +414,7 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, bind(&write_barrier); JumpIfSmi(src.gp(), &exit, Label::kNear); if (COMPRESS_POINTERS_BOOL) { - DecompressTaggedPointer(src.gp(), src.gp()); + DecompressTagged(src.gp(), src.gp()); } CheckPageFlag(src.gp(), scratch, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, @@ -1339,7 +1339,7 @@ void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, int64_t imm) { if (!is_int32(imm)) { - TurboAssembler::Move(kScratchRegister, imm); + MacroAssembler::Move(kScratchRegister, imm); if (lhs.gp() == dst.gp()) { addq(dst.gp(), kScratchRegister); } else { @@ -1640,10 +1640,10 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { static constexpr uint32_t kSignBit = uint32_t{1} << 31; if (dst == src) { - TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1); + MacroAssembler::Move(kScratchDoubleReg, kSignBit - 1); Andps(dst, kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit - 1); + MacroAssembler::Move(dst, kSignBit - 1); Andps(dst, src); } } @@ -1651,10 +1651,10 @@ void LiftoffAssembler::emit_f32_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint32_t kSignBit = uint32_t{1} << 31; if (dst == src) { - TurboAssembler::Move(kScratchDoubleReg, kSignBit); + MacroAssembler::Move(kScratchDoubleReg, kSignBit); Xorps(dst, kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit); + MacroAssembler::Move(dst, kSignBit); Xorps(dst, src); } } @@ -1773,10 +1773,10 @@ void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { static constexpr uint64_t kSignBit = uint64_t{1} << 63; if (dst == src) { - TurboAssembler::Move(kScratchDoubleReg, kSignBit - 1); + MacroAssembler::Move(kScratchDoubleReg, kSignBit - 1); Andpd(dst, kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit - 1); + MacroAssembler::Move(dst, kSignBit - 1); Andpd(dst, src); } } @@ -1784,10 +1784,10 @@ void LiftoffAssembler::emit_f64_abs(DoubleRegister dst, DoubleRegister src) { void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { static constexpr uint64_t kSignBit = uint64_t{1} << 63; if (dst == src) { - TurboAssembler::Move(kScratchDoubleReg, kSignBit); + MacroAssembler::Move(kScratchDoubleReg, kSignBit); Xorpd(dst, kScratchDoubleReg); } else { - TurboAssembler::Move(dst, kSignBit); + MacroAssembler::Move(dst, kSignBit); Xorpd(dst, src); } } @@ -2234,7 +2234,8 @@ void LiftoffAssembler::emit_i64_set_cond(Condition cond, Register dst, } namespace liftoff { -template +template void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { Label cont; @@ -2261,14 +2262,14 @@ void EmitFloatSetCond(LiftoffAssembler* assm, Condition cond, Register dst, void LiftoffAssembler::emit_f32_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { - liftoff::EmitFloatSetCond<&TurboAssembler::Ucomiss>(this, cond, dst, lhs, + liftoff::EmitFloatSetCond<&MacroAssembler::Ucomiss>(this, cond, dst, lhs, rhs); } void LiftoffAssembler::emit_f64_set_cond(Condition cond, Register dst, DoubleRegister lhs, DoubleRegister rhs) { - liftoff::EmitFloatSetCond<&TurboAssembler::Ucomisd>(this, cond, dst, lhs, + liftoff::EmitFloatSetCond<&MacroAssembler::Ucomisd>(this, cond, dst, lhs, rhs); } @@ -2394,7 +2395,7 @@ inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst, assm->setcc(not_equal, dst.gp()); } -template +template inline void EmitAllTrue(LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, base::Optional feature = base::nullopt) { @@ -2501,7 +2502,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, uint32_t imms[4]; // Shuffles that use just 1 operand are called swizzles, rhs can be ignored. wasm::SimdShuffle::Pack16Lanes(imms, shuffle); - TurboAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]), + MacroAssembler::Move(kScratchDoubleReg, make_uint64(imms[3], imms[2]), make_uint64(imms[1], imms[0])); Pshufb(dst.fp(), lhs.fp(), kScratchDoubleReg); return; @@ -2514,7 +2515,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, mask1[j] <<= 8; mask1[j] |= lane < kSimd128Size ? lane : 0x80; } - TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]); + MacroAssembler::Move(liftoff::kScratchDoubleReg2, mask1[1], mask1[0]); Pshufb(kScratchDoubleReg, lhs.fp(), liftoff::kScratchDoubleReg2); uint64_t mask2[2] = {}; @@ -2524,7 +2525,7 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, mask2[j] <<= 8; mask2[j] |= lane >= kSimd128Size ? (lane & 0x0F) : 0x80; } - TurboAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]); + MacroAssembler::Move(liftoff::kScratchDoubleReg2, mask2[1], mask2[0]); Pshufb(dst.fp(), rhs.fp(), liftoff::kScratchDoubleReg2); Por(dst.fp(), kScratchDoubleReg); @@ -2901,7 +2902,7 @@ void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, const uint8_t imms[16]) { uint64_t vals[2]; memcpy(vals, imms, sizeof(vals)); - TurboAssembler::Move(dst.fp(), vals[1], vals[0]); + MacroAssembler::Move(dst.fp(), vals[1], vals[0]); } void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { @@ -2959,7 +2960,7 @@ void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst, void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqb>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqb>(this, dst, src); } void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst, @@ -3084,7 +3085,7 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqw>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqw>(this, dst, src); } void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst, @@ -3294,7 +3295,7 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqd>(this, dst, src); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqd>(this, dst, src); } void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst, @@ -3462,7 +3463,7 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst, void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst, LiftoffRegister src) { - liftoff::EmitAllTrue<&TurboAssembler::Pcmpeqq>(this, dst, src, SSE4_1); + liftoff::EmitAllTrue<&MacroAssembler::Pcmpeqq>(this, dst, src, SSE4_1); } void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, @@ -4161,7 +4162,7 @@ void LiftoffAssembler::CallTrapCallbackForTesting() { } void LiftoffAssembler::AssertUnreachable(AbortReason reason) { - TurboAssembler::AssertUnreachable(reason); + MacroAssembler::AssertUnreachable(reason); } void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h index b33e9c9570..9cb7f84722 100644 --- a/src/wasm/function-body-decoder-impl.h +++ b/src/wasm/function-body-decoder-impl.h @@ -1194,7 +1194,8 @@ struct ControlBase : public PcForErrors { F(StringViewIterSlice, const Value& view, const Value& codepoints, \ Value* result) \ F(StringCompare, const Value& lhs, const Value& rhs, Value* result) \ - F(StringFromCodePoint, const Value& code_point, Value* result) + F(StringFromCodePoint, const Value& code_point, Value* result) \ + F(StringHash, const Value& string, Value* result) // This is a global constant invalid instruction trace, to be pointed at by // the current instruction trace pointer in the default case @@ -2328,6 +2329,7 @@ class WasmDecoder : public Decoder { case kExprStringEncodeWtf16Array: case kExprStringCompare: case kExprStringFromCodePoint: + case kExprStringHash: return length; default: // This is unreachable except for malformed modules. @@ -2531,6 +2533,7 @@ class WasmDecoder : public Decoder { case kExprStringViewWtf16Length: case kExprStringViewIterNext: case kExprStringFromCodePoint: + case kExprStringHash: return { 1, 1 }; case kExprStringNewUtf8: case kExprStringNewUtf8Try: @@ -5125,27 +5128,29 @@ class WasmFullDecoder : public WasmDecoder { // Temporary non-standard instruction, for performance experiments. if (!VALIDATE(this->enabled_.has_ref_cast_nop())) { this->DecodeError( - "Invalid opcode 0xfb48 (enable with " + "Invalid opcode 0xfb4c (enable with " "--experimental-wasm-ref-cast-nop)"); return 0; } - IndexImmediate imm(this, this->pc_ + opcode_length, "type index", - validate); - if (!this->ValidateType(this->pc_ + opcode_length, imm)) return 0; + HeapTypeImmediate imm(this->enabled_, this, this->pc_ + opcode_length, + validate); + if (!this->Validate(this->pc_ + opcode_length, imm)) return 0; opcode_length += imm.length; + HeapType target_type = imm.type; Value obj = Peek(0); - if (!VALIDATE(IsSubtypeOf(obj.type, kWasmFuncRef, this->module_) || - IsSubtypeOf(obj.type, kWasmStructRef, this->module_) || - IsSubtypeOf(obj.type, kWasmArrayRef, this->module_) || + if (!VALIDATE((obj.type.is_object_reference() && + IsSameTypeHierarchy(obj.type.heap_type(), target_type, + this->module_)) || obj.type.is_bottom())) { - PopTypeError(0, obj, - "subtype of (ref null func), (ref null struct) or (ref " - "null array)"); + this->DecodeError( + obj.pc(), + "Invalid types for %s: %s of type %s has to " + "be in the same reference type hierarchy as (ref %s)", + WasmOpcodes::OpcodeName(opcode), SafeOpcodeNameAt(obj.pc()), + obj.type.name().c_str(), target_type.name().c_str()); return 0; } - Value value = CreateValue(ValueType::RefMaybeNull( - imm.index, - obj.type.is_bottom() ? kNonNullable : obj.type.nullability())); + Value value = CreateValue(ValueType::Ref(target_type)); CALL_INTERFACE_IF_OK_AND_REACHABLE(Forward, obj, &value); Drop(obj); Push(value); @@ -6187,6 +6192,15 @@ class WasmFullDecoder : public WasmDecoder { Push(result); return opcode_length; } + case kExprStringHash: { + NON_CONST_ONLY + Value string = Peek(0, 0, kWasmStringRef); + Value result = CreateValue(kWasmI32); + CALL_INTERFACE_IF_OK_AND_REACHABLE(StringHash, string, &result); + Drop(1); + Push(result); + return opcode_length; + } default: this->DecodeError("invalid stringref opcode: %x", opcode); return 0; diff --git a/src/wasm/graph-builder-interface.cc b/src/wasm/graph-builder-interface.cc index 24b4fc091b..3a176604ee 100644 --- a/src/wasm/graph-builder-interface.cc +++ b/src/wasm/graph-builder-interface.cc @@ -1723,12 +1723,9 @@ class WasmGraphBuildingInterface { } void StringAsWtf16(FullDecoder* decoder, const Value& str, Value* result) { - // Since we implement stringview_wtf16 as string, that's the type we'll - // use for the Node. (The decoder's Value type must be stringview_wtf16 - // because static type validation relies on it.) - result->node = builder_->SetType( - builder_->AssertNotNull(str.node, decoder->position()), - ValueType::Ref(HeapType::kString)); + SetAndTypeNode(result, + builder_->StringAsWtf16(str.node, NullCheckFor(str.type), + decoder->position())); } void StringViewWtf16GetCodeUnit(FullDecoder* decoder, const Value& view, @@ -1802,6 +1799,12 @@ class WasmGraphBuildingInterface { SetAndTypeNode(result, builder_->StringFromCodePoint(code_point.node)); } + void StringHash(FullDecoder* decoder, const Value& string, Value* result) { + SetAndTypeNode(result, + builder_->StringHash(string.node, NullCheckFor(string.type), + decoder->position())); + } + void Forward(FullDecoder* decoder, const Value& from, Value* to) { if (from.type == to->type) { to->node = from.node; diff --git a/src/wasm/jump-table-assembler.cc b/src/wasm/jump-table-assembler.cc index e2f5e2b85b..79c5e50c73 100644 --- a/src/wasm/jump-table-assembler.cc +++ b/src/wasm/jump-table-assembler.cc @@ -203,7 +203,7 @@ bool JumpTableAssembler::EmitJumpSlot(Address target) { ptrdiff_t jump_distance = reinterpret_cast(target) - jump_pc; DCHECK_EQ(0, jump_distance % kInstrSize); int64_t instr_offset = jump_distance / kInstrSize; - if (!TurboAssembler::IsNearCallOffset(instr_offset)) { + if (!MacroAssembler::IsNearCallOffset(instr_offset)) { return false; } diff --git a/src/wasm/jump-table-assembler.h b/src/wasm/jump-table-assembler.h index b545d51a28..eeb399996b 100644 --- a/src/wasm/jump-table-assembler.h +++ b/src/wasm/jump-table-assembler.h @@ -57,7 +57,7 @@ namespace wasm { // execute the old code afterwards, which is no problem, since that code remains // available until it is garbage collected. Garbage collection itself is a // synchronization barrier though. -class V8_EXPORT_PRIVATE JumpTableAssembler : public TurboAssembler { +class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler { public: // Translate an offset into the continuous jump table to a jump table index. static uint32_t SlotOffsetToIndex(uint32_t slot_offset) { @@ -175,7 +175,7 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public TurboAssembler { private: // Instantiate a {JumpTableAssembler} for patching. explicit JumpTableAssembler(Address slot_addr, int size = 256) - : TurboAssembler(nullptr, JumpTableAssemblerOptions(), + : MacroAssembler(nullptr, JumpTableAssemblerOptions(), CodeObjectRequired::kNo, ExternalAssemblerBuffer( reinterpret_cast(slot_addr), size)) {} diff --git a/src/wasm/wasm-code-manager.h b/src/wasm/wasm-code-manager.h index 7c0cf78270..019acfafcf 100644 --- a/src/wasm/wasm-code-manager.h +++ b/src/wasm/wasm-code-manager.h @@ -132,6 +132,7 @@ struct WasmModule; V(WasmStringConcat) \ V(WasmStringEqual) \ V(WasmStringIsUSVSequence) \ + V(WasmStringAsWtf16) \ V(WasmStringViewWtf16GetCodeUnit) \ V(WasmStringViewWtf16Encode) \ V(WasmStringViewWtf16Slice) \ @@ -150,6 +151,7 @@ struct WasmModule; V(WasmStringViewIterSlice) \ V(WasmStringCompare) \ V(WasmStringFromCodePoint) \ + V(WasmStringHash) \ V(WasmExternInternalize) // Sorted, disjoint and non-overlapping memory regions. A region is of the diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h index 91e9a4f159..c3840e8689 100644 --- a/src/wasm/wasm-opcodes.h +++ b/src/wasm/wasm-opcodes.h @@ -765,6 +765,7 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig); V(StringViewIterSlice, 0xfba4, _, "stringview_iter.slice") \ V(StringCompare, 0xfba8, _, "string.compare") \ V(StringFromCodePoint, 0xfba9, _, "string.from_code_point") \ + V(StringHash, 0xfbaa, _, "string.hash") \ V(StringNewUtf8Array, 0xfbb0, _, "string.new_utf8_array") \ V(StringNewWtf16Array, 0xfbb1, _, "string.new_wtf16_array") \ V(StringEncodeUtf8Array, 0xfbb2, _, "string.encode_utf8_array") \ diff --git a/test/cctest/compiler/test-code-generator.cc b/test/cctest/compiler/test-code-generator.cc index d52574c303..511e30f063 100644 --- a/test/cctest/compiler/test-code-generator.cc +++ b/test/cctest/compiler/test-code-generator.cc @@ -838,7 +838,8 @@ class TestEnvironment : public HandleAndZoneScope { for (auto move : *moves) { int to_index = OperandToStatePosition( TeardownLayout(), AllocatedOperand::cast(move->destination())); - state_out->set(to_index, GetMoveSource(state_in, move)); + Object source = GetMoveSource(state_in, move); + state_out->set(to_index, source); } // If we generated redundant moves, they were eliminated automatically and // don't appear in the parallel move. Simulate them now. @@ -1147,7 +1148,7 @@ class CodeGeneratorTester { Builtin::kNoBuiltinId, kMaxUnoptimizedFrameHeight, kMaxPushedArgumentCount); - generator_->tasm()->CodeEntry(); + generator_->masm()->CodeEntry(); // Force a frame to be created. generator_->frame_access_state()->MarkHasFrame(true); @@ -1239,10 +1240,10 @@ class CodeGeneratorTester { void CheckAssembleMove(InstructionOperand* source, InstructionOperand* destination) { - int start = generator_->tasm()->pc_offset(); + int start = generator_->masm()->pc_offset(); generator_->AssembleMove(MaybeTranslateSlot(source), MaybeTranslateSlot(destination)); - CHECK(generator_->tasm()->pc_offset() > start); + CHECK(generator_->masm()->pc_offset() > start); } void CheckAssembleMoves(ParallelMove* moves) { @@ -1255,15 +1256,15 @@ class CodeGeneratorTester { void CheckAssembleSwap(InstructionOperand* source, InstructionOperand* destination) { - int start = generator_->tasm()->pc_offset(); + int start = generator_->masm()->pc_offset(); generator_->AssembleSwap(MaybeTranslateSlot(source), MaybeTranslateSlot(destination)); - CHECK(generator_->tasm()->pc_offset() > start); + CHECK(generator_->masm()->pc_offset() > start); } Handle Finalize() { generator_->FinishCode(); - generator_->safepoints()->Emit(generator_->tasm(), + generator_->safepoints()->Emit(generator_->masm(), frame_.GetTotalFrameSlotCount()); generator_->MaybeEmitOutOfLineConstantPool(); diff --git a/test/cctest/test-assembler-arm64.cc b/test/cctest/test-assembler-arm64.cc index 27b06a14ee..282488b0d5 100644 --- a/test/cctest/test-assembler-arm64.cc +++ b/test/cctest/test-assembler-arm64.cc @@ -12408,7 +12408,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size, case PushPopByFour: // Push high-numbered registers first (to the highest addresses). for (i = reg_count; i >= 4; i -= 4) { - __ Push(r[i - 1], r[i - 2], r[i - 3], + __ Push(r[i - 1], r[i - 2], r[i - 3], r[i - 4]); } // Finish off the leftovers. @@ -12433,7 +12433,7 @@ static void PushPopSimpleHelper(int reg_count, int reg_size, case PushPopByFour: // Pop low-numbered registers first (from the lowest addresses). for (i = 0; i <= (reg_count-4); i += 4) { - __ Pop(r[i], r[i + 1], r[i + 2], + __ Pop(r[i], r[i + 1], r[i + 2], r[i + 3]); } // Finish off the leftovers. @@ -12975,7 +12975,7 @@ TEST(copy_double_words_downwards_even) { __ SlotAddress(x5, 12); __ SlotAddress(x6, 11); __ Mov(x7, 12); - __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst); + __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst); __ Pop(xzr, x4, x5, x6); __ Pop(x7, x8, x9, x10); @@ -13029,7 +13029,7 @@ TEST(copy_double_words_downwards_odd) { __ SlotAddress(x5, 13); __ SlotAddress(x6, 12); __ Mov(x7, 13); - __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst); + __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst); __ Pop(xzr, x4); __ Pop(x5, x6, x7, x8); @@ -13085,13 +13085,13 @@ TEST(copy_noop) { __ SlotAddress(x5, 3); __ SlotAddress(x6, 2); __ Mov(x7, 0); - __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kSrcLessThanDst); + __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kSrcLessThanDst); // dst < src, count == 0 __ SlotAddress(x5, 2); __ SlotAddress(x6, 3); __ Mov(x7, 0); - __ CopyDoubleWords(x5, x6, x7, TurboAssembler::kDstLessThanSrc); + __ CopyDoubleWords(x5, x6, x7, MacroAssembler::kDstLessThanSrc); __ Pop(x1, x2, x3, x4); __ Pop(x5, x6, x7, x8); diff --git a/test/cctest/test-assembler-mips64.cc b/test/cctest/test-assembler-mips64.cc index b8d6b29f54..5e075118f8 100644 --- a/test/cctest/test-assembler-mips64.cc +++ b/test/cctest/test-assembler-mips64.cc @@ -6195,11 +6195,11 @@ TEST(Trampoline_with_massive_unbound_labels) { MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); const int kNumSlots = - TurboAssembler::kMaxBranchOffset / TurboAssembler::kTrampolineSlotsSize; + MacroAssembler::kMaxBranchOffset / MacroAssembler::kTrampolineSlotsSize; Label labels[kNumSlots]; { - TurboAssembler::BlockTrampolinePoolScope block_trampoline_pool(&assm); + MacroAssembler::BlockTrampolinePoolScope block_trampoline_pool(&assm); for (int i = 0; i < kNumSlots; i++) { __ Branch(&labels[i]); } @@ -6218,12 +6218,12 @@ TEST(Call_with_trampoline) { int next_buffer_check_ = v8_flags.force_long_branches ? kMaxInt - : TurboAssembler::kMaxBranchOffset - - TurboAssembler::kTrampolineSlotsSize * 16; + : MacroAssembler::kMaxBranchOffset - + MacroAssembler::kTrampolineSlotsSize * 16; Label done; __ Branch(&done); - next_buffer_check_ -= TurboAssembler::kTrampolineSlotsSize; + next_buffer_check_ -= MacroAssembler::kTrampolineSlotsSize; int num_nops = (next_buffer_check_ - __ pc_offset()) / kInstrSize - 1; for (int i = 0; i < num_nops; i++) { diff --git a/test/cctest/test-shared-strings.cc b/test/cctest/test-shared-strings.cc index c40d6fc5a5..89b23e4b37 100644 --- a/test/cctest/test-shared-strings.cc +++ b/test/cctest/test-shared-strings.cc @@ -357,33 +357,49 @@ class ConcurrentInternalizationThread final namespace { +Handle CreateSharedOneByteString(Isolate* isolate, Factory* factory, + int length, bool internalize) { + char* ascii = new char[length + 1]; + // Don't make single character strings, which will end up deduplicating to + // an RO string and mess up the string table hit test. + CHECK_GT(length, 1); + for (int j = 0; j < length; j++) ascii[j] = 'a'; + ascii[length] = '\0'; + if (internalize) { + // When testing concurrent string table hits, pre-internalize a string + // of the same contents so all subsequent internalizations are hits. + factory->InternalizeString(factory->NewStringFromAsciiChecked(ascii)); + } + Handle string = String::Share( + isolate, factory->NewStringFromAsciiChecked(ascii, AllocationType::kOld)); + delete[] ascii; + CHECK(string->IsShared()); + string->EnsureHash(); + return string; +} + Handle CreateSharedOneByteStrings(Isolate* isolate, Factory* factory, int count, - int min_length = 2, + int lo_count, int min_length = 2, bool internalize = false) { Handle shared_strings = - factory->NewFixedArray(count, AllocationType::kSharedOld); + factory->NewFixedArray(count + lo_count, AllocationType::kSharedOld); { // Create strings in their own scope to be able to delete and GC them. HandleScope scope(isolate); for (int i = 0; i < count; i++) { - char* ascii = new char[i + min_length + 1]; - // Don't make single character strings, which will end up deduplicating to - // an RO string and mess up the string table hit test. - for (int j = 0; j < i + min_length; j++) ascii[j] = 'a'; - ascii[i + min_length] = '\0'; - if (internalize) { - // When testing concurrent string table hits, pre-internalize a string - // of the same contents so all subsequent internalizations are hits. - factory->InternalizeString(factory->NewStringFromAsciiChecked(ascii)); - } - Handle string = String::Share( - isolate, - factory->NewStringFromAsciiChecked(ascii, AllocationType::kOld)); - CHECK(string->IsShared()); - string->EnsureHash(); + int length = i + min_length + 1; + Handle string = + CreateSharedOneByteString(isolate, factory, length, internalize); shared_strings->set(i, *string); - delete[] ascii; + } + int min_lo_length = + isolate->heap()->MaxRegularHeapObjectSize(AllocationType::kOld) + 1; + for (int i = 0; i < lo_count; i++) { + int length = i + min_lo_length + 1; + Handle string = + CreateSharedOneByteString(isolate, factory, length, internalize); + shared_strings->set(count + i, *string); } } return shared_strings; @@ -396,6 +412,7 @@ void TestConcurrentInternalization(TestHitOrMiss hit_or_miss) { constexpr int kThreads = 4; constexpr int kStrings = 4096; + constexpr int kLOStrings = 16; MultiClientIsolateTest test; Isolate* i_isolate = test.i_main_isolate(); @@ -403,8 +420,9 @@ void TestConcurrentInternalization(TestHitOrMiss hit_or_miss) { HandleScope scope(i_isolate); - Handle shared_strings = CreateSharedOneByteStrings( - i_isolate, factory, kStrings, 2, hit_or_miss == kTestHit); + Handle shared_strings = + CreateSharedOneByteStrings(i_isolate, factory, kStrings - kLOStrings, + kLOStrings, 2, hit_or_miss == kTestHit); ParkingSemaphore sema_ready(0); ParkingSemaphore sema_execute_start(0); @@ -479,6 +497,7 @@ UNINITIALIZED_TEST(ConcurrentStringTableLookup) { constexpr int kTotalThreads = 4; constexpr int kInternalizationThreads = 1; constexpr int kStrings = 4096; + constexpr int kLOStrings = 16; MultiClientIsolateTest test; Isolate* i_isolate = test.i_main_isolate(); @@ -486,8 +505,8 @@ UNINITIALIZED_TEST(ConcurrentStringTableLookup) { HandleScope scope(i_isolate); - Handle shared_strings = - CreateSharedOneByteStrings(i_isolate, factory, kStrings, 2, false); + Handle shared_strings = CreateSharedOneByteStrings( + i_isolate, factory, kStrings - kLOStrings, kLOStrings, 2, false); ParkingSemaphore sema_ready(0); ParkingSemaphore sema_execute_start(0); @@ -1070,6 +1089,7 @@ UNINITIALIZED_TEST(InternalizedSharedStringsTransitionDuringGC) { v8_flags.transition_strings_during_gc_with_stack = true; constexpr int kStrings = 4096; + constexpr int kLOStrings = 16; MultiClientIsolateTest test; Isolate* i_isolate = test.i_main_isolate(); @@ -1079,8 +1099,8 @@ UNINITIALIZED_TEST(InternalizedSharedStringsTransitionDuringGC) { // Run two times to test that everything is reset correctly during GC. for (int run = 0; run < 2; run++) { - Handle shared_strings = - CreateSharedOneByteStrings(i_isolate, factory, kStrings, 2, run == 0); + Handle shared_strings = CreateSharedOneByteStrings( + i_isolate, factory, kStrings - kLOStrings, kLOStrings, 2, run == 0); // Check strings are in the forwarding table after internalization. for (int i = 0; i < shared_strings->length(); i++) { @@ -1217,6 +1237,7 @@ UNINITIALIZED_TEST(ExternalizedSharedStringsTransitionDuringGC) { MultiClientIsolateTest test; constexpr int kStrings = 4096; + constexpr int kLOStrings = 16; Isolate* i_isolate = test.i_main_isolate(); Factory* factory = i_isolate->factory(); @@ -1226,7 +1247,7 @@ UNINITIALIZED_TEST(ExternalizedSharedStringsTransitionDuringGC) { // Run two times to test that everything is reset correctly during GC. for (int run = 0; run < 2; run++) { Handle shared_strings = CreateSharedOneByteStrings( - i_isolate, factory, kStrings, ExternalString::kUncachedSize, run == 0); + i_isolate, factory, kStrings - kLOStrings, kLOStrings, 2, run == 0); // Check strings are in the forwarding table after internalization. for (int i = 0; i < shared_strings->length(); i++) { @@ -1648,6 +1669,7 @@ void TestConcurrentExternalization(bool share_resources) { constexpr int kThreads = 4; constexpr int kStrings = 4096; + constexpr int kLOStrings = 16; Isolate* i_isolate = test.i_main_isolate(); Factory* factory = i_isolate->factory(); @@ -1655,7 +1677,8 @@ void TestConcurrentExternalization(bool share_resources) { HandleScope scope(i_isolate); Handle shared_strings = CreateSharedOneByteStrings( - i_isolate, factory, kStrings, ExternalString::kUncachedSize, false); + i_isolate, factory, kStrings - kLOStrings, kLOStrings, + ExternalString::kUncachedSize, false); ParkingSemaphore sema_ready(0); ParkingSemaphore sema_execute_start(0); @@ -1731,6 +1754,7 @@ void TestConcurrentExternalizationWithDeadStrings(bool share_resources, constexpr int kThreads = 4; constexpr int kStrings = 12; + constexpr int kLOStrings = 2; Isolate* i_isolate = test.i_main_isolate(); Factory* factory = i_isolate->factory(); @@ -1738,7 +1762,8 @@ void TestConcurrentExternalizationWithDeadStrings(bool share_resources, HandleScope scope(i_isolate); Handle shared_strings = CreateSharedOneByteStrings( - i_isolate, factory, kStrings, ExternalString::kUncachedSize, false); + i_isolate, factory, kStrings - kLOStrings, kLOStrings, + ExternalString::kUncachedSize, false); ParkingSemaphore sema_ready(0); ParkingSemaphore sema_execute_start(0); @@ -1860,6 +1885,7 @@ void TestConcurrentExternalizationAndInternalization( constexpr int kTotalThreads = kInternalizationThreads + kExternalizationThreads; constexpr int kStrings = 4096; + constexpr int kLOStrings = 16; Isolate* i_isolate = test.i_main_isolate(); Factory* factory = i_isolate->factory(); @@ -1867,8 +1893,8 @@ void TestConcurrentExternalizationAndInternalization( HandleScope scope(i_isolate); Handle shared_strings = CreateSharedOneByteStrings( - i_isolate, factory, kStrings, ExternalString::kUncachedSize, - hit_or_miss == kTestHit); + i_isolate, factory, kStrings - kLOStrings, kLOStrings, + ExternalString::kUncachedSize, hit_or_miss == kTestHit); ParkingSemaphore sema_ready(0); ParkingSemaphore sema_execute_start(0); diff --git a/test/cctest/wasm/test-grow-memory.cc b/test/cctest/wasm/test-grow-memory.cc index a984fc9706..b796e8ff9a 100644 --- a/test/cctest/wasm/test-grow-memory.cc +++ b/test/cctest/wasm/test-grow-memory.cc @@ -114,8 +114,7 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMem) { handle(memory_object->array_buffer(), isolate)); // Grow using an internal Wasm bytecode. - result = testing::CallWasmFunctionForTesting(isolate, instance, "main", 0, - nullptr); + result = testing::CallWasmFunctionForTesting(isolate, instance, "main", {}); CHECK_EQ(26, result); CHECK(external2.buffer_->was_detached()); // growing always detaches CHECK_EQ(0, external2.buffer_->byte_length()); diff --git a/test/cctest/wasm/test-run-wasm-module.cc b/test/cctest/wasm/test-run-wasm-module.cc index 8de6b4224d..e46be43703 100644 --- a/test/cctest/wasm/test-run-wasm-module.cc +++ b/test/cctest/wasm/test-run-wasm-module.cc @@ -138,7 +138,7 @@ TEST(Run_WasmModule_CompilationHintsLazy) { isolate, &thrower, module.ToHandleChecked(), {}, {}); CHECK(!instance.is_null()); int32_t result = testing::CallWasmFunctionForTesting( - isolate, instance.ToHandleChecked(), "main", 0, nullptr); + isolate, instance.ToHandleChecked(), "main", {}); CHECK_EQ(kReturnValue, result); // Lazy function was invoked and therefore compiled. @@ -578,7 +578,7 @@ TEST(TestInterruptLoop) { InterruptThread thread(isolate, memory_array); CHECK(thread.Start()); - testing::CallWasmFunctionForTesting(isolate, instance, "main", 0, nullptr); + testing::CallWasmFunctionForTesting(isolate, instance, "main", {}); Address address = reinterpret_cast
( &memory_array[InterruptThread::interrupt_location_]); CHECK_EQ(InterruptThread::interrupt_value_, @@ -658,16 +658,17 @@ TEST(Run_WasmModule_GrowMemOobFixedIndex) { // Initial memory size is 16 pages, should trap till index > MemSize on // consecutive GrowMem calls for (uint32_t i = 1; i < 5; i++) { - Handle params[1] = {Handle(Smi::FromInt(i), isolate)}; + Handle params[1] = {handle(Smi::FromInt(i), isolate)}; v8::TryCatch try_catch(reinterpret_cast(isolate)); - testing::CallWasmFunctionForTesting(isolate, instance, "main", 1, params); + testing::CallWasmFunctionForTesting(isolate, instance, "main", + base::ArrayVector(params)); CHECK(try_catch.HasCaught()); isolate->clear_pending_exception(); } - Handle params[1] = {Handle(Smi::FromInt(1), isolate)}; - int32_t result = testing::CallWasmFunctionForTesting(isolate, instance, - "main", 1, params); + Handle params[1] = {handle(Smi::FromInt(1), isolate)}; + int32_t result = testing::CallWasmFunctionForTesting( + isolate, instance, "main", base::ArrayVector(params)); CHECK_EQ(0xACED, result); } Cleanup(); @@ -708,23 +709,24 @@ TEST(Run_WasmModule_GrowMemOobVariableIndex) { Handle params[1] = { Handle(Smi::FromInt((16 + i) * kPageSize - 3), isolate)}; v8::TryCatch try_catch(reinterpret_cast(isolate)); - testing::CallWasmFunctionForTesting(isolate, instance, "main", 1, params); + testing::CallWasmFunctionForTesting(isolate, instance, "main", + base::ArrayVector(params)); CHECK(try_catch.HasCaught()); isolate->clear_pending_exception(); } for (int i = 1; i < 5; i++) { Handle params[1] = { - Handle(Smi::FromInt((20 + i) * kPageSize - 4), isolate)}; - int32_t result = testing::CallWasmFunctionForTesting(isolate, instance, - "main", 1, params); + handle(Smi::FromInt((20 + i) * kPageSize - 4), isolate)}; + int32_t result = testing::CallWasmFunctionForTesting( + isolate, instance, "main", base::ArrayVector(params)); CHECK_EQ(0xACED, result); } v8::TryCatch try_catch(reinterpret_cast(isolate)); - Handle params[1] = { - Handle(Smi::FromInt(25 * kPageSize), isolate)}; - testing::CallWasmFunctionForTesting(isolate, instance, "main", 1, params); + Handle params[1] = {handle(Smi::FromInt(25 * kPageSize), isolate)}; + testing::CallWasmFunctionForTesting(isolate, instance, "main", + base::ArrayVector(params)); CHECK(try_catch.HasCaught()); isolate->clear_pending_exception(); } diff --git a/test/cctest/wasm/test-wasm-serialization.cc b/test/cctest/wasm/test-wasm-serialization.cc index 27daee1762..b23bf65e5a 100644 --- a/test/cctest/wasm/test-wasm-serialization.cc +++ b/test/cctest/wasm/test-wasm-serialization.cc @@ -96,10 +96,10 @@ class WasmSerializationTest { Handle::null(), MaybeHandle()) .ToHandleChecked(); - Handle params[1] = { - Handle(Smi::FromInt(41), CcTest::i_isolate())}; + Handle params[1] = {handle(Smi::FromInt(41), CcTest::i_isolate())}; int32_t result = testing::CallWasmFunctionForTesting( - CcTest::i_isolate(), instance, kFunctionName, 1, params); + CcTest::i_isolate(), instance, kFunctionName, + base::ArrayVector(params)); CHECK_EQ(42, result); } @@ -171,7 +171,7 @@ class WasmSerializationTest { CHECK_EQ(0, data_.size); while (data_.size == 0) { testing::CallWasmFunctionForTesting(serialization_isolate, instance, - kFunctionName, 0, nullptr); + kFunctionName, {}); data_ = compiled_module.Serialize(); } CHECK_LT(0, data_.size); diff --git a/test/cctest/wasm/test-wasm-shared-engine.cc b/test/cctest/wasm/test-wasm-shared-engine.cc index 2591cf92d1..d02c8ff6ad 100644 --- a/test/cctest/wasm/test-wasm-shared-engine.cc +++ b/test/cctest/wasm/test-wasm-shared-engine.cc @@ -73,8 +73,7 @@ class SharedEngineIsolate { } int32_t Run(Handle instance) { - return testing::CallWasmFunctionForTesting(isolate(), instance, "main", 0, - nullptr); + return testing::CallWasmFunctionForTesting(isolate(), instance, "main", {}); } private: diff --git a/test/common/wasm/wasm-module-runner.cc b/test/common/wasm/wasm-module-runner.cc index e4c5b3f2d8..bab0241c70 100644 --- a/test/common/wasm/wasm-module-runner.cc +++ b/test/common/wasm/wasm-module-runner.cc @@ -130,7 +130,7 @@ int32_t CompileAndRunWasmModule(Isolate* isolate, const byte* module_start, return -1; } return CallWasmFunctionForTesting(isolate, instance.ToHandleChecked(), "main", - 0, nullptr); + {}); } WasmInterpretationResult InterpretWasmModule( @@ -218,9 +218,10 @@ MaybeHandle GetExportedFunction( int32_t CallWasmFunctionForTesting(Isolate* isolate, Handle instance, - const char* name, int argc, - Handle argv[], bool* exception) { - if (exception) *exception = false; + const char* name, + base::Vector> args, + std::unique_ptr* exception) { + DCHECK_IMPLIES(exception != nullptr, *exception == nullptr); MaybeHandle maybe_export = GetExportedFunction(isolate, instance, name); Handle main_export; @@ -230,14 +231,18 @@ int32_t CallWasmFunctionForTesting(Isolate* isolate, // Call the JS function. Handle undefined = isolate->factory()->undefined_value(); - MaybeHandle retval = - Execution::Call(isolate, main_export, undefined, argc, argv); + MaybeHandle retval = Execution::Call(isolate, main_export, undefined, + args.length(), args.begin()); // The result should be a number. if (retval.is_null()) { DCHECK(isolate->has_pending_exception()); + if (exception) { + Handle exception_string = Object::NoSideEffectsToString( + isolate, handle(isolate->pending_exception(), isolate)); + *exception = exception_string->ToCString(); + } isolate->clear_pending_exception(); - if (exception) *exception = true; return -1; } Handle result = retval.ToHandleChecked(); diff --git a/test/common/wasm/wasm-module-runner.h b/test/common/wasm/wasm-module-runner.h index 9d5d38b733..00ddca519f 100644 --- a/test/common/wasm/wasm-module-runner.h +++ b/test/common/wasm/wasm-module-runner.h @@ -30,13 +30,12 @@ MaybeHandle GetExportedFunction( // Call an exported wasm function by name. Returns -1 if the export does not // exist or throws an error. Errors are cleared from the isolate before -// returning. {exception} is set to to true if an exception happened during -// execution of the wasm function. -int32_t CallWasmFunctionForTesting(Isolate* isolate, - Handle instance, - const char* name, int argc, - Handle argv[], - bool* exception = nullptr); +// returning. {exception} is set to a string representation of the exception (if +// set and an exception occurs). +int32_t CallWasmFunctionForTesting( + Isolate* isolate, Handle instance, const char* name, + base::Vector> args, + std::unique_ptr* exception = nullptr); // Decode, verify, and run the function labeled "main" in the // given encoded module. The module should have no imports. diff --git a/test/fuzzer/wasm-fuzzer-common.cc b/test/fuzzer/wasm-fuzzer-common.cc index 347884e610..eb219fbe29 100644 --- a/test/fuzzer/wasm-fuzzer-common.cc +++ b/test/fuzzer/wasm-fuzzer-common.cc @@ -122,10 +122,9 @@ void ExecuteAgainstReference(Isolate* isolate, base::OwnedVector> compiled_args = testing::MakeDefaultArguments(isolate, main_function->sig()); - bool exception_ref = false; + std::unique_ptr exception_ref; int32_t result_ref = testing::CallWasmFunctionForTesting( - isolate, instance_ref, "main", static_cast(compiled_args.size()), - compiled_args.begin(), &exception_ref); + isolate, instance_ref, "main", compiled_args.as_vector(), &exception_ref); // Reached max steps, do not try to execute the test module as it might // never terminate. if (max_steps < 0) return; @@ -155,15 +154,14 @@ void ExecuteAgainstReference(Isolate* isolate, DCHECK(!thrower.error()); } - bool exception = false; + std::unique_ptr exception; int32_t result = testing::CallWasmFunctionForTesting( - isolate, instance, "main", static_cast(compiled_args.size()), - compiled_args.begin(), &exception); + isolate, instance, "main", compiled_args.as_vector(), &exception); - if (exception_ref != exception) { - const char* exception_text[] = {"no exception", "exception"}; - FATAL("expected: %s; got: %s", exception_text[exception_ref], - exception_text[exception]); + if ((exception_ref != nullptr) != (exception != nullptr)) { + FATAL("Exception mispatch! Expected: <%s>; got: <%s>", + exception_ref ? exception_ref.get() : "", + exception ? exception.get() : ""); } if (!exception) { diff --git a/test/inspector/debugger/object-preview-internal-properties-expected.txt b/test/inspector/debugger/object-preview-internal-properties-expected.txt index 1bdef8231b..60fa264522 100644 --- a/test/inspector/debugger/object-preview-internal-properties-expected.txt +++ b/test/inspector/debugger/object-preview-internal-properties-expected.txt @@ -384,6 +384,11 @@ expression: /123/ type : boolean value : false } +{ + name : unicodeSets + type : boolean + value : false +} expression: ({}) diff --git a/test/inspector/runtime/remote-object-expected.txt b/test/inspector/runtime/remote-object-expected.txt index 040c567cd5..765f8e26ef 100644 --- a/test/inspector/runtime/remote-object-expected.txt +++ b/test/inspector/runtime/remote-object-expected.txt @@ -484,6 +484,16 @@ Running test: testRegExp type : object } } +'/w+/v', returnByValue: false, generatePreview: false +{ + result : { + className : RegExp + description : /w+/v + objectId : + subtype : regexp + type : object + } +} '/w+/dgimsuy', returnByValue: false, generatePreview: false { result : { @@ -494,6 +504,16 @@ Running test: testRegExp type : object } } +'/w+/dgimsvy', returnByValue: false, generatePreview: false +{ + result : { + className : RegExp + description : /w+/dgimsvy + objectId : + subtype : regexp + type : object + } +} 'new RegExp('\w+', 'g')', returnByValue: false, generatePreview: false { result : { @@ -526,6 +546,18 @@ Running test: testRegExp type : object } } +'var re = /./dgimsvy; + re.toString = () => 'foo'; + re', returnByValue: false, generatePreview: false +{ + result : { + className : RegExp + description : /./dgimsvy + objectId : + subtype : regexp + type : object + } +} 'var re = new RegExp('\w+', 'g'); re.prop = 32; re', returnByValue: false, generatePreview: true diff --git a/test/inspector/runtime/remote-object.js b/test/inspector/runtime/remote-object.js index d50d7aa0a5..ed4e764e43 100644 --- a/test/inspector/runtime/remote-object.js +++ b/test/inspector/runtime/remote-object.js @@ -234,9 +234,15 @@ InspectorTest.runAsyncTestSuite([ InspectorTest.logMessage((await evaluate({ expression: '/\w+/y' })).result); + InspectorTest.logMessage((await evaluate({ + expression: '/\w+/v' + })).result); InspectorTest.logMessage((await evaluate({ expression: '/\w+/dgimsuy' })).result); + InspectorTest.logMessage((await evaluate({ + expression: '/\w+/dgimsvy' + })).result); InspectorTest.logMessage((await evaluate({ expression: `new RegExp('\\w+', 'g')`, })).result); @@ -248,6 +254,11 @@ InspectorTest.runAsyncTestSuite([ re.toString = () => 'foo'; re` })).result); + InspectorTest.logMessage((await evaluate({ + expression: `var re = /./dgimsvy; + re.toString = () => 'foo'; + re` + })).result); InspectorTest.logMessage((await evaluate({ expression: `var re = new RegExp('\\w+', 'g'); re.prop = 32; diff --git a/test/mjsunit/compiler/typedarray-resizablearraybuffer.js b/test/mjsunit/compiler/typedarray-resizablearraybuffer.js index 2cd42de7c2..b2a72363d9 100644 --- a/test/mjsunit/compiler/typedarray-resizablearraybuffer.js +++ b/test/mjsunit/compiler/typedarray-resizablearraybuffer.js @@ -451,6 +451,83 @@ assertEquals(9, ByteLength(dv)); assertOptimized(ByteLength); })(); +const dataview_data_sizes = ['Int8', 'Uint8', 'Int16', 'Uint16', 'Int32', + 'Uint32', 'Float32', 'Float64', 'BigInt64', + 'BigUint64']; + +// Global variable used for DataViews; this is important for triggering some +// optimizations. +var dv; +(function() { +for (let use_global_var of [true, false]) { + for (let shared of [false, true]) { + for (let length_tracking of [false, true]) { + for (let with_offset of [false, true]) { + for (let data_size of dataview_data_sizes) { + const test_case = `Testing: Get_${ + data_size}_${ + shared ? 'GSAB' : 'RAB'}_${ + length_tracking ? + 'LengthTracking' : + 'FixedLength'}${with_offset ? 'WithOffset' : ''}_${ + use_global_var ? 'UseGlobalVar' : ''}_DataView`; + // console.log(test_case); + const is_bigint = data_size.startsWith('Big'); + const expected_value = is_bigint ? 0n : 0; + + const get_code = 'return dv.get' + data_size + '(0); // ' + test_case; + const Get = use_global_var ? + new Function(get_code) : new Function('dv', get_code); + + const offset = with_offset ? 8 : 0; + + let blen = 8; // Enough for one element. + const fixed_blen = length_tracking ? undefined : blen; + const ab = CreateBuffer(shared, 8*10, 8*20); + // Assign to the global var. + dv = new DataView(ab, offset, fixed_blen); + const Resize = MakeResize(DataView, shared, offset, fixed_blen); + + assertUnoptimized(Get); + %PrepareFunctionForOptimization(Get); + assertEquals(expected_value, Get(dv)); + assertEquals(expected_value, Get(dv)); + %OptimizeFunctionOnNextCall(Get); + assertEquals(expected_value, Get(dv)); + assertOptimized(Get); + + // Enough for one element or more (even with offset). + blen = Resize(ab, 8 + offset); + assertEquals(expected_value, Get(dv)); + assertOptimized(Get); + + blen = Resize(ab, 0); // Not enough for one element. + if (shared) { + assertEquals(expected_value, Get(dv)); + } else { + if (!length_tracking || with_offset) { + // DataView is out of bounds. + assertThrows(() => { Get(dv); }, TypeError); + } else { + // DataView is valid, the index is out of bounds. + assertThrows(() => { Get(dv); }, RangeError); + } + } + + blen = Resize(ab, 64); + assertEquals(expected_value, Get(dv)); + + if (!shared) { + %ArrayBufferDetach(ab); + assertThrows(() => { Get(dv); }, TypeError); + } + } + } + } + } +} +})(); + (function() { function Read_TA_RAB_LengthTracking_Mixed(ta, index) { return ta[index]; diff --git a/test/mjsunit/harmony/regexp-unicode-sets.js b/test/mjsunit/harmony/regexp-unicode-sets.js index 8288b34c86..b5a66192cf 100644 --- a/test/mjsunit/harmony/regexp-unicode-sets.js +++ b/test/mjsunit/harmony/regexp-unicode-sets.js @@ -184,6 +184,13 @@ check( /[\q{ĀĂĄĆ|AaAc}--\q{āăąć}]/vi, ['AaAc', 'aAaC'], ['ĀĂĄĆ', 'āăąć'], false); +// Empty nested classes. +check(/[a-c\q{foo|bar}[]]/v, ['a','b','c','foo','bar'], [], false); +check(/[[a-c\q{foo|bar}]&&[]]/v, [], ['a','b','c','foo','bar'], true); +check(/[[a-c\q{foo|bar}]--[]]/v, ['a','b','c','foo','bar'], [], false); +check(/[[]&&[a-c\q{foo|bar}]]/v, [], ['a','b','c','foo','bar'], true); +check(/[[]--[a-c\q{foo|bar}]]/v, [], ['a','b','c','foo','bar'], true); + // Empty string disjunctions matches nothing, but succeeds. let res = /[\q{}]/v.exec('foo'); assertNotNull(res); diff --git a/test/mjsunit/maglev/polymorphic-load-number.js b/test/mjsunit/maglev/polymorphic-load-number.js new file mode 100644 index 0000000000..cfda707d7c --- /dev/null +++ b/test/mjsunit/maglev/polymorphic-load-number.js @@ -0,0 +1,18 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --allow-natives-syntax --maglev + + +function foo(o) { + return o.length; +} + +%PrepareFunctionForOptimization(foo); +assertEquals(6, foo("string")); +assertEquals(undefined, foo(4.2)); + +%OptimizeMaglevOnNextCall(foo); +assertEquals(6, foo("string")); +assertEquals(undefined, foo(4.2)); diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index 4ca62f8e1a..7c92dc8b9b 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -326,37 +326,6 @@ 'code-coverage-ad-hoc': [SKIP], 'code-coverage-precise': [SKIP], - # Unsuitable for GC stress because it interferes with the execution of - # FinalizationRegistry cleanup tasks or with the clearing of WeakRefs, - # when asynchronous GC is used. - 'harmony/weakrefs/cleanup': [SKIP], - 'harmony/weakrefs/cleanup-is-not-a-microtask': [SKIP], - 'harmony/weakrefs/cleanup-on-detached-realm': [SKIP], - 'harmony/weakrefs/cleanupsome': [SKIP], - 'harmony/weakrefs/cleanupsome-after-unregister': [SKIP], - 'harmony/weakrefs/finalizationregistry-and-weakref': [SKIP], - 'harmony/weakrefs/finalizationregistry-independent-lifetime': [SKIP], - 'harmony/weakrefs/finalizationregistry-independent-lifetime-multiple': [SKIP], - 'harmony/weakrefs/finalizationregistry-keeps-holdings-alive': [SKIP], - 'harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times': [SKIP], - 'harmony/weakrefs/multiple-dirty-finalization-groups': [SKIP], - 'harmony/weakrefs/reentrant-gc-from-cleanup': [SKIP], - 'harmony/weakrefs/symbol-as-weakref-target-gc': [SKIP], - 'harmony/weakrefs/symbol-in-finalizationregistry': [SKIP], - 'harmony/weakrefs/two-weakrefs': [SKIP], - 'harmony/weakrefs/undefined-holdings': [SKIP], - 'harmony/weakrefs/unregister-after-cleanup': [SKIP], - 'harmony/weakrefs/unregister-before-cleanup': [SKIP], - 'harmony/weakrefs/unregister-called-twice': [SKIP], - 'harmony/weakrefs/unregister-inside-cleanup2': [SKIP], - 'harmony/weakrefs/unregister-inside-cleanup3': [SKIP], - 'harmony/weakrefs/unregister-inside-cleanup': [SKIP], - 'harmony/weakrefs/unregister-many': [SKIP], - 'harmony/weakrefs/unregister-when-cleanup-already-scheduled': [SKIP], - 'harmony/weakrefs/weak-cell-basics': [SKIP], - 'harmony/weakrefs/weakref-creation-keeps-alive': [SKIP], - 'harmony/weakrefs/weakref-deref-keeps-alive': [SKIP], - # Takes too long with TF. 'array-sort': [PASS, NO_VARIANTS], 'regress/regress-91008': [PASS, NO_VARIANTS], @@ -412,6 +381,43 @@ 'maglev/inner-function': [SKIP], }], # 'gc_stress' +############################################################################## +['gc_stress or variant == stress_concurrent_allocation', { + # These tests check that FinalizationRegistry cleanup tasks and/or the + # clearing of WeakRefs work as expected. They use carefully triggered + # synchronous or asynchronous GCs to achieve that and they assume that + # there are no unexpected, externally triggered GCs that would interfere + # with the tests. Therefore, they are unsuitable for modes that stress + # activities which can trigger GC. + 'harmony/weakrefs/cleanup': [SKIP], + 'harmony/weakrefs/cleanup-is-not-a-microtask': [SKIP], + 'harmony/weakrefs/cleanup-on-detached-realm': [SKIP], + 'harmony/weakrefs/cleanupsome': [SKIP], + 'harmony/weakrefs/cleanupsome-after-unregister': [SKIP], + 'harmony/weakrefs/finalizationregistry-and-weakref': [SKIP], + 'harmony/weakrefs/finalizationregistry-independent-lifetime': [SKIP], + 'harmony/weakrefs/finalizationregistry-independent-lifetime-multiple': [SKIP], + 'harmony/weakrefs/finalizationregistry-keeps-holdings-alive': [SKIP], + 'harmony/weakrefs/finalizationregistry-scheduled-for-cleanup-multiple-times': [SKIP], + 'harmony/weakrefs/multiple-dirty-finalization-groups': [SKIP], + 'harmony/weakrefs/reentrant-gc-from-cleanup': [SKIP], + 'harmony/weakrefs/symbol-as-weakref-target-gc': [SKIP], + 'harmony/weakrefs/symbol-in-finalizationregistry': [SKIP], + 'harmony/weakrefs/two-weakrefs': [SKIP], + 'harmony/weakrefs/undefined-holdings': [SKIP], + 'harmony/weakrefs/unregister-after-cleanup': [SKIP], + 'harmony/weakrefs/unregister-before-cleanup': [SKIP], + 'harmony/weakrefs/unregister-called-twice': [SKIP], + 'harmony/weakrefs/unregister-inside-cleanup2': [SKIP], + 'harmony/weakrefs/unregister-inside-cleanup3': [SKIP], + 'harmony/weakrefs/unregister-inside-cleanup': [SKIP], + 'harmony/weakrefs/unregister-many': [SKIP], + 'harmony/weakrefs/unregister-when-cleanup-already-scheduled': [SKIP], + 'harmony/weakrefs/weak-cell-basics': [SKIP], + 'harmony/weakrefs/weakref-creation-keeps-alive': [SKIP], + 'harmony/weakrefs/weakref-deref-keeps-alive': [SKIP], +}], # 'gc_stress or variant == stress_concurrent_allocation' + ############################################################################## # TODO(v8:7777): Change this once wasm is supported in jitless mode. ['not has_webassembly or variant == jitless', { diff --git a/test/mjsunit/regress/regress-1412629.js b/test/mjsunit/regress/regress-1412629.js new file mode 100644 index 0000000000..03e67d12fd --- /dev/null +++ b/test/mjsunit/regress/regress-1412629.js @@ -0,0 +1,18 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --allow-natives-syntax + +function foo(x) { + return NaN ** x; +} + +%PrepareFunctionForOptimization(foo); +assertEquals(NaN, foo(1)); +assertEquals(1, foo(0)); +assertEquals(1, foo(-0)); +%OptimizeFunctionOnNextCall(foo); +assertEquals(NaN, foo(1)); +assertEquals(1, foo(0)); +assertEquals(1, foo(-0)); diff --git a/test/mjsunit/regress/regress-crbug-1407384.js b/test/mjsunit/regress/regress-crbug-1407384.js new file mode 100644 index 0000000000..9d9501dc41 --- /dev/null +++ b/test/mjsunit/regress/regress-crbug-1407384.js @@ -0,0 +1,22 @@ +// Copyright 2023 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +function main() { + let v0 = 1.5; + do { + const v5 = BigInt.asIntN(6, 4n); + const v6 = v5 / v5; + const v7 = v6 / v6; + do { + [v7]; + } while (v0 < 0); + --v0; + } while (v0 < 0); +} +%PrepareFunctionForOptimization(main); +main(); +%OptimizeFunctionOnNextCall(main); +main(); diff --git a/test/mjsunit/wasm/stringrefs-exec.js b/test/mjsunit/wasm/stringrefs-exec.js index 18dbe2b53e..b7260ee65d 100644 --- a/test/mjsunit/wasm/stringrefs-exec.js +++ b/test/mjsunit/wasm/stringrefs-exec.js @@ -1296,3 +1296,34 @@ function makeWtf16TestDataSegment() { WebAssembly.RuntimeError, /Invalid code point [0-9]+/); } })(); + +(function TestStringHash() { + print(arguments.callee.name); + let builder = new WasmModuleBuilder(); + builder.addFunction("hash", kSig_i_w) + .exportFunc() + .addBody([ + kExprLocalGet, 0, + ...GCInstr(kExprStringHash), + ]); + + let hash = builder.instantiate().exports.hash; + assertEquals(hash(""), hash("")); + assertEquals(hash("foo"), hash("foo")); + assertEquals(hash("bar"), hash("bar")); + assertEquals(hash("123"), hash("123")); + // Assuming that hash collisions are very rare. + assertNotEquals(hash("foo"), hash("bar")); + // Test with cons strings. + assertEquals(hash("f" + "o" + "o"), hash("foo")); + assertEquals(hash("f" + 1), hash("f1")); + + assertEquals(hash(new String(" foo ").trim()), hash("foo")); + assertEquals(hash(new String("xfoox").substring(1, 4)), hash("foo")); + + // Test integer index hash. + let dummy_obj = {123: 456}; + let index_string = "123"; + assertEquals(456, dummy_obj[index_string]); + assertEquals(hash("1" + "23"), hash(index_string)); +})(); diff --git a/test/mjsunit/wasm/stringrefs-valid.js b/test/mjsunit/wasm/stringrefs-valid.js index 923378728f..d045f77c14 100644 --- a/test/mjsunit/wasm/stringrefs-valid.js +++ b/test/mjsunit/wasm/stringrefs-valid.js @@ -257,6 +257,12 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32], ...GCInstr(kExprStringFromCodePoint) ]); + builder.addFunction("string.hash", kSig_i_w) + .addBody([ + kExprLocalGet, 0, + ...GCInstr(kExprStringHash) + ]); + let i8_array = builder.addArray(kWasmI8, true); let i16_array = builder.addArray(kWasmI16, true); diff --git a/test/mjsunit/wasm/wasm-module-builder.js b/test/mjsunit/wasm/wasm-module-builder.js index f871928ca2..10f2803562 100644 --- a/test/mjsunit/wasm/wasm-module-builder.js +++ b/test/mjsunit/wasm/wasm-module-builder.js @@ -569,6 +569,7 @@ let kExprStringViewIterRewind = 0xa3 let kExprStringViewIterSlice = 0xa4; let kExprStringCompare = 0xa8; let kExprStringFromCodePoint = 0xa9; +let kExprStringHash = 0xaa; let kExprStringNewUtf8Array = 0xb0; let kExprStringNewWtf16Array = 0xb1; let kExprStringEncodeUtf8Array = 0xb2; diff --git a/test/test262/test262.status b/test/test262/test262.status index 0e3c81fb7e..1e577ed8dc 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -216,12 +216,6 @@ # https://bugs.chromium.org/p/v8/issues/detail?id=12209 'intl402/Intl/supportedValuesOf/collations-accepted-by-Collator': [FAIL], - # https://github.com/tc39/test262/issues/3711 - 'intl402/DateTimeFormat/prototype/formatRange/temporal-objects-resolved-time-zone': [FAIL], - 'intl402/DateTimeFormat/prototype/formatRangeToParts/temporal-objects-resolved-time-zone': [FAIL], - 'intl402/DateTimeFormat/prototype/format/temporal-objects-resolved-time-zone': [FAIL], - 'intl402/DateTimeFormat/prototype/formatToParts/temporal-objects-resolved-time-zone': [FAIL], - # https://bugs.chromium.org/p/v8/issues/detail?id=7831 'language/statements/generators/generator-created-after-decl-inst': [FAIL], 'language/expressions/generators/generator-created-after-decl-inst': [FAIL], @@ -992,10 +986,62 @@ 'built-ins/Temporal/ZonedDateTime/prototype/since/nanoseconds-to-days-range-errors': [FAIL], 'built-ins/Temporal/ZonedDateTime/prototype/until/nanoseconds-to-days-range-errors': [FAIL], + 'built-ins/Temporal/Calendar/from/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/Calendar/prototype/yearOfWeek/argument-propertybag-calendar-case-insensitive': [FAIL], + 'built-ins/Temporal/Duration/prototype/round/roundingincrement-out-of-range': [FAIL], + 'built-ins/Temporal/Instant/prototype/toZonedDateTime/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/Now/plainDate/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/Now/plainDateTime/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/Now/zonedDateTime/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/PlainDate/argument-convert': [FAIL], + 'built-ins/Temporal/PlainDate/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/PlainDate/prototype/day/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainDate/prototype/monthCode/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainDate/prototype/month/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainDate/prototype/since/roundingincrement-out-of-range': [FAIL], + 'built-ins/Temporal/PlainDate/prototype/until/roundingincrement-out-of-range': [FAIL], + 'built-ins/Temporal/PlainDate/prototype/withCalendar/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/PlainDate/prototype/with/order-of-operations': [FAIL], + 'built-ins/Temporal/PlainDate/prototype/year/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainDateTime/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/PlainDateTime/prototype/day/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainDateTime/prototype/monthCode/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainDateTime/prototype/month/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainDateTime/prototype/since/roundingincrement-non-integer': [FAIL], + 'built-ins/Temporal/PlainDateTime/prototype/until/roundingincrement-non-integer': [FAIL], + 'built-ins/Temporal/PlainDateTime/prototype/withCalendar/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/PlainDateTime/prototype/year/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainMonthDay/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/PlainMonthDay/prototype/day/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainMonthDay/prototype/monthCode/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainMonthDay/prototype/with/order-of-operations': [FAIL], + 'built-ins/Temporal/PlainTime/prototype/with/order-of-operations': [FAIL], + 'built-ins/Temporal/PlainYearMonth/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/PlainYearMonth/prototype/monthCode/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainYearMonth/prototype/month/validate-calendar-value': [FAIL], + 'built-ins/Temporal/PlainYearMonth/prototype/since/roundingincrement-out-of-range': [FAIL], + 'built-ins/Temporal/PlainYearMonth/prototype/until/roundingincrement-out-of-range': [FAIL], + 'built-ins/Temporal/PlainYearMonth/prototype/with/order-of-operations': [FAIL], + 'built-ins/Temporal/PlainYearMonth/prototype/year/validate-calendar-value': [FAIL], + 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/ZonedDateTime/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/ZonedDateTime/prototype/day/validate-calendar-value': [FAIL], + 'built-ins/Temporal/ZonedDateTime/prototype/monthCode/validate-calendar-value': [FAIL], + 'built-ins/Temporal/ZonedDateTime/prototype/month/validate-calendar-value': [FAIL], + 'built-ins/Temporal/ZonedDateTime/prototype/since/roundingincrement-non-integer': [FAIL], + 'built-ins/Temporal/ZonedDateTime/prototype/until/roundingincrement-non-integer': [FAIL], + 'built-ins/Temporal/ZonedDateTime/prototype/withCalendar/calendar-temporal-object': [FAIL], + 'built-ins/Temporal/ZonedDateTime/prototype/year/validate-calendar-value': [FAIL], + 'intl402/Temporal/PlainDate/prototype/era/validate-calendar-value': [FAIL], + 'intl402/Temporal/PlainDate/prototype/eraYear/validate-calendar-value': [FAIL], + 'intl402/Temporal/PlainDateTime/prototype/era/validate-calendar-value': [FAIL], + 'intl402/Temporal/PlainDateTime/prototype/eraYear/validate-calendar-value': [FAIL], + 'intl402/Temporal/PlainYearMonth/prototype/era/validate-calendar-value': [FAIL], + 'intl402/Temporal/PlainYearMonth/prototype/eraYear/validate-calendar-value': [FAIL], + 'intl402/Temporal/ZonedDateTime/prototype/era/validate-calendar-value': [FAIL], + 'intl402/Temporal/ZonedDateTime/prototype/eraYear/validate-calendar-value': [FAIL], + # https://bugs.chromium.org/p/v8/issues/detail?id=12763 - 'language/statements/class/decorator/syntax/valid/class-element-decorator-call-expr-identifier-reference-yield': [FAIL], - 'language/statements/class/decorator/syntax/valid/class-element-decorator-member-expr-identifier-reference-yield': [FAIL], - 'language/statements/class/decorator/syntax/valid/class-element-decorator-parenthesized-expr-identifier-reference-yield': [FAIL], 'language/statements/class/decorator/syntax/valid/class-element-decorator-call-expr-identifier-reference': [FAIL], 'language/statements/class/decorator/syntax/valid/class-element-decorator-member-expr-decorator-member-expr': [FAIL], 'language/statements/class/decorator/syntax/valid/class-element-decorator-member-expr-identifier-reference': [FAIL], @@ -1049,6 +1095,12 @@ # https://bugs.chromium.org/p/v8/issues/detail?id=12681 'built-ins/Array/prototype/push/set-length-zero-array-length-is-non-writable': [FAIL], + # https://bugs.chromium.org/p/v8/issues/detail?id=13658 + 'built-ins/ArrayBuffer/prototype/transfer/from-resizable-to-larger': [FAIL], + 'built-ins/ArrayBuffer/prototype/transfer/from-resizable-to-same': [FAIL], + 'built-ins/ArrayBuffer/prototype/transfer/from-resizable-to-smaller': [FAIL], + 'built-ins/ArrayBuffer/prototype/transfer/from-resizable-to-zero': [FAIL], + ######################## NEEDS INVESTIGATION ########################### # https://bugs.chromium.org/p/v8/issues/detail?id=7833 diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn index cb428e3985..bf90543df9 100644 --- a/test/unittests/BUILD.gn +++ b/test/unittests/BUILD.gn @@ -635,7 +635,7 @@ v8_source_set("unittests_sources") { if (v8_current_cpu == "arm") { sources += [ "assembler/disasm-arm-unittest.cc", - "assembler/turbo-assembler-arm-unittest.cc", + "assembler/macro-assembler-arm-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/arm/instruction-selector-arm-unittest.cc" ] @@ -644,7 +644,6 @@ v8_source_set("unittests_sources") { sources += [ "assembler/disasm-arm64-unittest.cc", "assembler/macro-assembler-arm64-unittest.cc", - "assembler/turbo-assembler-arm64-unittest.cc", "codegen/pointer-auth-arm64-unittest.cc", ] if (v8_enable_turbofan) { @@ -656,7 +655,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "x86") { sources += [ "assembler/disasm-ia32-unittest.cc", - "assembler/turbo-assembler-ia32-unittest.cc", + "assembler/macro-assembler-ia32-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/ia32/instruction-selector-ia32-unittest.cc" ] @@ -664,7 +663,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { sources += [ "assembler/disasm-mips64-unittest.cc", - "assembler/turbo-assembler-mips64-unittest.cc", + "assembler/macro-assembler-mips64-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/mips64/instruction-selector-mips64-unittest.cc" ] @@ -672,7 +671,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "riscv64") { sources += [ "assembler/disasm-riscv-unittest.cc", - "assembler/turbo-assembler-riscv-unittest.cc", + "assembler/macro-assembler-riscv-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/riscv64/instruction-selector-riscv64-unittest.cc" ] @@ -680,7 +679,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "riscv32") { sources += [ "assembler/disasm-riscv-unittest.cc", - "assembler/turbo-assembler-riscv-unittest.cc", + "assembler/macro-assembler-riscv-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/riscv32/instruction-selector-riscv32-unittest.cc" ] @@ -690,7 +689,6 @@ v8_source_set("unittests_sources") { "assembler/assembler-x64-unittest.cc", "assembler/disasm-x64-unittest.cc", "assembler/macro-assembler-x64-unittest.cc", - "assembler/turbo-assembler-x64-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/x64/instruction-selector-x64-unittest.cc" ] @@ -701,7 +699,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64") { sources += [ "assembler/disasm-ppc-unittest.cc", - "assembler/turbo-assembler-ppc-unittest.cc", + "assembler/macro-assembler-ppc-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/ppc/instruction-selector-ppc-unittest.cc" ] @@ -709,7 +707,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") { sources += [ "assembler/disasm-s390-unittest.cc", - "assembler/turbo-assembler-s390-unittest.cc", + "assembler/macro-assembler-s390-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/s390/instruction-selector-s390-unittest.cc" ] @@ -717,7 +715,7 @@ v8_source_set("unittests_sources") { } else if (v8_current_cpu == "loong64") { sources += [ "assembler/disasm-loong64-unittest.cc", - "assembler/turbo-assembler-loong64-unittest.cc", + "assembler/macro-assembler-loong64-unittest.cc", ] if (v8_enable_turbofan) { sources += [ "compiler/loong64/instruction-selector-loong64-unittest.cc" ] diff --git a/test/unittests/assembler/turbo-assembler-arm-unittest.cc b/test/unittests/assembler/macro-assembler-arm-unittest.cc similarity index 86% rename from test/unittests/assembler/turbo-assembler-arm-unittest.cc rename to test/unittests/assembler/macro-assembler-arm-unittest.cc index 6fa1bd5927..f7ec44e77f 100644 --- a/test/unittests/assembler/turbo-assembler-arm-unittest.cc +++ b/test/unittests/assembler/macro-assembler-arm-unittest.cc @@ -13,7 +13,7 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // If we are running on android and the output is not redirected (i.e. ends up // in the android log) then we cannot find the error message in the output. This @@ -28,11 +28,11 @@ namespace internal { // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -40,7 +40,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -48,9 +48,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason")); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -62,7 +62,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -102,17 +102,17 @@ const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = { const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001}; template -class TurboAssemblerTestWithParam : public TurboAssemblerTest, +class MacroAssemblerTestWithParam : public MacroAssemblerTest, public ::testing::WithParamInterface {}; -using TurboAssemblerTestMoveObjectAndSlot = - TurboAssemblerTestWithParam; +using MacroAssemblerTestMoveObjectAndSlot = + MacroAssemblerTestWithParam; -TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { +TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { const MoveObjectAndSlotTestCase test_case = GetParam(); TRACED_FOREACH(int32_t, offset, kOffsets) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ Push(r0); __ Move(test_case.object, r1); @@ -143,7 +143,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { __ RecordComment("--"); // The `result` pointer was saved on the stack. - UseScratchRegisterScope temps(&tasm); + UseScratchRegisterScope temps(&masm); Register scratch = temps.Acquire(); __ Pop(scratch); __ str(dst_object, MemOperand(scratch)); @@ -152,7 +152,7 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { __ Ret(); CodeDesc desc; - tasm.GetCode(nullptr, &desc); + masm.GetCode(nullptr, &desc); if (v8_flags.print_code) { Handle code = Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING).Build(); @@ -179,8 +179,8 @@ TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { } } -INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest, - TurboAssemblerTestMoveObjectAndSlot, +INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest, + MacroAssemblerTestMoveObjectAndSlot, ::testing::ValuesIn(kMoveObjectAndSlotTestCases)); #undef __ diff --git a/test/unittests/assembler/macro-assembler-arm64-unittest.cc b/test/unittests/assembler/macro-assembler-arm64-unittest.cc index 021b0423f3..3bbbc49096 100644 --- a/test/unittests/assembler/macro-assembler-arm64-unittest.cc +++ b/test/unittests/assembler/macro-assembler-arm64-unittest.cc @@ -1,129 +1,254 @@ -// Copyright 2019 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. -#include - -#include "src/codegen/arm64/assembler-arm64-inl.h" -#include "src/codegen/macro-assembler-inl.h" -#include "src/deoptimizer/deoptimizer.h" -#include "src/heap/factory.h" -#include "src/objects/objects-inl.h" +#include "src/codegen/arm64/macro-assembler-arm64-inl.h" +#include "src/codegen/macro-assembler.h" +#include "src/execution/simulator.h" #include "src/utils/ostreams.h" #include "test/common/assembler-tester.h" #include "test/unittests/test-utils.h" +#include "testing/gtest-support.h" namespace v8 { namespace internal { -namespace test_macro_assembler_arm64 { - -using MacroAssemblerArm64Test = TestWithIsolate; - -using F0 = int(); #define __ masm. -TEST_F(MacroAssemblerArm64Test, EmbeddedObj) { -#ifdef V8_COMPRESS_POINTERS - Isolate* isolate = i_isolate(); - HandleScope handles(isolate); - - auto buffer = AllocateAssemblerBuffer(); - MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, - buffer->CreateView()); - - AssemblerBufferWriteScope rw_scope(*buffer); - - Handle old_array = isolate->factory()->NewFixedArray(2000); - Handle my_array = isolate->factory()->NewFixedArray(1000); - __ Mov(w4, Immediate(my_array, RelocInfo::COMPRESSED_EMBEDDED_OBJECT)); - __ Mov(x5, old_array); - __ ret(x5); - - CodeDesc desc; - masm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef DEBUG - StdoutStream os; - code->Print(os); +// If we are running on android and the output is not redirected (i.e. ends up +// in the android log) then we cannot find the error message in the output. This +// macro just returns the empty string in that case. +#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) +#define ERROR_MESSAGE(msg) "" +#else +#define ERROR_MESSAGE(msg) msg #endif - // Collect garbage to ensure reloc info can be walked by the heap. - CollectAllGarbage(); - CollectAllGarbage(); - CollectAllGarbage(); +// Test the x64 assembler by compiling some simple functions into +// a buffer and executing them. These tests do not initialize the +// V8 library, create a context, or use any V8 objects. - PtrComprCageBase cage_base(isolate); +class MacroAssemblerTest : public TestWithIsolate {}; - // Test the user-facing reloc interface. - const int mode_mask = RelocInfo::EmbeddedObjectModeMask(); - for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { - RelocInfo::Mode mode = it.rinfo()->rmode(); - if (RelocInfo::IsCompressedEmbeddedObject(mode)) { - CHECK_EQ(*my_array, it.rinfo()->target_object(cage_base)); - } else { - CHECK(RelocInfo::IsFullEmbeddedObject(mode)); - CHECK_EQ(*old_array, it.rinfo()->target_object(cage_base)); - } - } -#endif // V8_COMPRESS_POINTERS -} - -TEST_F(MacroAssemblerArm64Test, DeoptExitSizeIsFixed) { - Isolate* isolate = i_isolate(); - HandleScope handles(isolate); +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); - AssemblerBufferWriteScope rw_scope(*buffer); + { + AssemblerBufferWriteScope rw_scope(*buffer); - static_assert(static_cast(kFirstDeoptimizeKind) == 0); - for (int i = 0; i < kDeoptimizeKindCount; i++) { - DeoptimizeKind kind = static_cast(i); - Label before_exit; - Builtin target = Deoptimizer::GetDeoptimizationEntry(kind); - // Mirroring logic in code-generator.cc. - if (kind == DeoptimizeKind::kLazy) { - // CFI emits an extra instruction here. - masm.BindExceptionHandler(&before_exit); - } else { - masm.bind(&before_exit); + __ CodeEntry(); + + __ Abort(AbortReason::kNoReason); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + } + // We need an isolate here to execute in the simulator. + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason")); +} + +TEST_F(MacroAssemblerTest, TestCheck) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); + + { + AssemblerBufferWriteScope rw_scope(*buffer); + + __ CodeEntry(); + + // Fail if the first parameter is 17. + __ Mov(w1, Immediate(17)); + __ Cmp(w0, w1); // 1st parameter is in {w0}. + __ Check(Condition::ne, AbortReason::kNoReason); + __ Ret(); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + } + // We need an isolate here to execute in the simulator. + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + f.Call(0); + f.Call(18); + ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason")); +} + +TEST_F(MacroAssemblerTest, CompareAndBranch) { + const int kTestCases[] = {-42, 0, 42}; + static_assert(Condition::eq == 0); + static_assert(Condition::le == 13); + TRACED_FORRANGE(int, cc, 0, 13) { // All conds except al and nv + Condition cond = static_cast(cc); + TRACED_FOREACH(int, imm, kTestCases) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(isolate(), AssemblerOptions{}, + CodeObjectRequired::kNo, buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); + + { + AssemblerBufferWriteScope rw_scope(*buffer); + + __ CodeEntry(); + + Label start, lab; + __ Bind(&start); + __ CompareAndBranch(x0, Immediate(imm), cond, &lab); + if (imm == 0 && ((cond == eq) || (cond == ne) || (cond == hi) || + (cond == ls))) { // One instruction generated + ASSERT_EQ(kInstrSize, __ SizeOfCodeGeneratedSince(&start)); + } else { // Two instructions generated + ASSERT_EQ(static_cast(2 * kInstrSize), + __ SizeOfCodeGeneratedSince(&start)); + } + __ Cmp(x0, Immediate(imm)); + __ Check(NegateCondition(cond), + AbortReason::kNoReason); // cond must not hold + __ Ret(); + __ Bind(&lab); // Branch leads here + __ Cmp(x0, Immediate(imm)); + __ Check(cond, AbortReason::kNoReason); // cond must hold + __ Ret(); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + } + // We need an isolate here to execute in the simulator. + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + TRACED_FOREACH(int, n, kTestCases) { f.Call(n); } } - masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit, - &before_exit); - CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit), - kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize - : Deoptimizer::kEagerDeoptExitSize); } } +struct MoveObjectAndSlotTestCase { + const char* comment; + Register dst_object; + Register dst_slot; + Register object; + Register offset_register = no_reg; +}; + +const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = { + {"no overlap", x0, x1, x2}, + {"no overlap", x0, x1, x2, x3}, + + {"object == dst_object", x2, x1, x2}, + {"object == dst_object", x2, x1, x2, x3}, + + {"object == dst_slot", x1, x2, x2}, + {"object == dst_slot", x1, x2, x2, x3}, + + {"offset == dst_object", x0, x1, x2, x0}, + + {"offset == dst_object && object == dst_slot", x0, x1, x1, x0}, + + {"offset == dst_slot", x0, x1, x2, x1}, + + {"offset == dst_slot && object == dst_object", x0, x1, x0, x1}}; + +// Make sure we include offsets that cannot be encoded in an add instruction. +const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001}; + +template +class MacroAssemblerTestWithParam : public MacroAssemblerTest, + public ::testing::WithParamInterface {}; + +using MacroAssemblerTestMoveObjectAndSlot = + MacroAssemblerTestWithParam; + +TEST_P(MacroAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { + const MoveObjectAndSlotTestCase test_case = GetParam(); + TRACED_FOREACH(int32_t, offset, kOffsets) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, + buffer->CreateView()); + + { + AssemblerBufferWriteScope rw_buffer_scope(*buffer); + + __ CodeEntry(); + __ Push(x0, padreg); + __ Mov(test_case.object, x1); + + Register src_object = test_case.object; + Register dst_object = test_case.dst_object; + Register dst_slot = test_case.dst_slot; + + Operand offset_operand(0); + if (test_case.offset_register == no_reg) { + offset_operand = Operand(offset); + } else { + __ Mov(test_case.offset_register, Operand(offset)); + offset_operand = Operand(test_case.offset_register); + } + + std::stringstream comment; + comment << "-- " << test_case.comment << ": MoveObjectAndSlot(" + << dst_object << ", " << dst_slot << ", " << src_object << ", "; + if (test_case.offset_register == no_reg) { + comment << "#" << offset; + } else { + comment << test_case.offset_register; + } + comment << ") --"; + __ RecordComment(comment.str().c_str()); + __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand); + __ RecordComment("--"); + + // The `result` pointer was saved on the stack. + UseScratchRegisterScope temps(&masm); + Register scratch = temps.AcquireX(); + __ Pop(padreg, scratch); + __ Str(dst_object, MemOperand(scratch)); + __ Str(dst_slot, MemOperand(scratch, kSystemPointerSize)); + + __ Ret(); + + CodeDesc desc; + masm.GetCode(nullptr, &desc); + if (v8_flags.print_code) { + Handle code = + Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING) + .Build(); + StdoutStream os; + code->Print(os); + } + } + + // We need an isolate here to execute in the simulator. + auto f = GeneratedCode::FromBuffer(isolate(), + buffer->start()); + + byte* object = new byte[offset]; + byte* result[] = {nullptr, nullptr}; + + f.Call(result, object); + + // The first element must be the address of the object, and the second the + // slot addressed by `offset`. + EXPECT_EQ(result[0], &object[0]); + EXPECT_EQ(result[1], &object[offset]); + + delete[] object; + } +} + +INSTANTIATE_TEST_SUITE_P(MacroAssemblerTest, + MacroAssemblerTestMoveObjectAndSlot, + ::testing::ValuesIn(kMoveObjectAndSlotTestCases)); + #undef __ +#undef ERROR_MESSAGE -} // namespace test_macro_assembler_arm64 } // namespace internal } // namespace v8 diff --git a/test/unittests/assembler/turbo-assembler-ia32-unittest.cc b/test/unittests/assembler/macro-assembler-ia32-unittest.cc similarity index 82% rename from test/unittests/assembler/turbo-assembler-ia32-unittest.cc rename to test/unittests/assembler/macro-assembler-ia32-unittest.cc index f0cb96d47d..cbf628ba88 100644 --- a/test/unittests/assembler/turbo-assembler-ia32-unittest.cc +++ b/test/unittests/assembler/macro-assembler-ia32-unittest.cc @@ -11,17 +11,17 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -29,16 +29,16 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -50,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ ret(0); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/turbo-assembler-loong64-unittest.cc b/test/unittests/assembler/macro-assembler-loong64-unittest.cc similarity index 83% rename from test/unittests/assembler/turbo-assembler-loong64-unittest.cc rename to test/unittests/assembler/macro-assembler-loong64-unittest.cc index 5334fb4be3..a2cc213cae 100644 --- a/test/unittests/assembler/turbo-assembler-loong64-unittest.cc +++ b/test/unittests/assembler/macro-assembler-loong64-unittest.cc @@ -12,33 +12,33 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the loong64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -48,7 +48,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/turbo-assembler-mips64-unittest.cc b/test/unittests/assembler/macro-assembler-mips64-unittest.cc similarity index 83% rename from test/unittests/assembler/turbo-assembler-mips64-unittest.cc rename to test/unittests/assembler/macro-assembler-mips64-unittest.cc index c954ffcc65..92e3b1d6f8 100644 --- a/test/unittests/assembler/turbo-assembler-mips64-unittest.cc +++ b/test/unittests/assembler/macro-assembler-mips64-unittest.cc @@ -12,17 +12,17 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -50,7 +50,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/turbo-assembler-ppc-unittest.cc b/test/unittests/assembler/macro-assembler-ppc-unittest.cc similarity index 58% rename from test/unittests/assembler/turbo-assembler-ppc-unittest.cc rename to test/unittests/assembler/macro-assembler-ppc-unittest.cc index 93ae7abafc..aabb988b29 100644 --- a/test/unittests/assembler/turbo-assembler-ppc-unittest.cc +++ b/test/unittests/assembler/macro-assembler-ppc-unittest.cc @@ -12,17 +12,17 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the ppc assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -52,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -62,23 +62,24 @@ TEST_F(TurboAssemblerTest, TestCheck) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, ReverseBitsU64) { +TEST_F(MacroAssemblerTest, ReverseBitsU64) { struct { - uint64_t expected; uint64_t input; + uint64_t expected; + uint64_t input; } values[] = { - {0x0000000000000000, 0x0000000000000000}, - {0xffffffffffffffff, 0xffffffffffffffff}, - {0x8000000000000000, 0x0000000000000001}, - {0x0000000000000001, 0x8000000000000000}, - {0x800066aa22cc4488, 0x1122334455660001}, - {0x1122334455660001, 0x800066aa22cc4488}, - {0xffffffff00000000, 0x00000000ffffffff}, - {0x00000000ffffffff, 0xffffffff00000000}, - {0xff01020304050607, 0xe060a020c04080ff}, - {0xe060a020c04080ff, 0xff01020304050607}, + {0x0000000000000000, 0x0000000000000000}, + {0xffffffffffffffff, 0xffffffffffffffff}, + {0x8000000000000000, 0x0000000000000001}, + {0x0000000000000001, 0x8000000000000000}, + {0x800066aa22cc4488, 0x1122334455660001}, + {0x1122334455660001, 0x800066aa22cc4488}, + {0xffffffff00000000, 0x00000000ffffffff}, + {0x00000000ffffffff, 0xffffffff00000000}, + {0xff01020304050607, 0xe060a020c04080ff}, + {0xe060a020c04080ff, 0xff01020304050607}, }; auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -87,28 +88,26 @@ TEST_F(TurboAssemblerTest, ReverseBitsU64) { __ Pop(r4, r5); __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); - auto f = GeneratedCode::FromBuffer(isolate(), - buffer->start()); - for (unsigned int i=0; i < (sizeof(values) / sizeof(values[0])); i++) { + auto f = + GeneratedCode::FromBuffer(isolate(), buffer->start()); + for (unsigned int i = 0; i < (sizeof(values) / sizeof(values[0])); i++) { CHECK_EQ(values[i].expected, f.Call(values[i].input)); } } -TEST_F(TurboAssemblerTest, ReverseBitsU32) { +TEST_F(MacroAssemblerTest, ReverseBitsU32) { struct { - uint64_t expected; uint64_t input; + uint64_t expected; + uint64_t input; } values[] = { - {0x00000000, 0x00000000}, - {0xffffffff, 0xffffffff}, - {0x00000001, 0x80000000}, - {0x80000000, 0x00000001}, - {0x22334455, 0xaa22cc44}, - {0xaa22cc44, 0x22334455}, + {0x00000000, 0x00000000}, {0xffffffff, 0xffffffff}, + {0x00000001, 0x80000000}, {0x80000000, 0x00000001}, + {0x22334455, 0xaa22cc44}, {0xaa22cc44, 0x22334455}, }; auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -117,11 +116,11 @@ TEST_F(TurboAssemblerTest, ReverseBitsU32) { __ Pop(r4, r5); __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); - auto f = GeneratedCode::FromBuffer(isolate(), - buffer->start()); - for (unsigned int i=0; i < (sizeof(values) / sizeof(values[0])); i++) { + auto f = + GeneratedCode::FromBuffer(isolate(), buffer->start()); + for (unsigned int i = 0; i < (sizeof(values) / sizeof(values[0])); i++) { CHECK_EQ(values[i].expected, f.Call(values[i].input)); } } diff --git a/test/unittests/assembler/turbo-assembler-riscv-unittest.cc b/test/unittests/assembler/macro-assembler-riscv-unittest.cc similarity index 83% rename from test/unittests/assembler/turbo-assembler-riscv-unittest.cc rename to test/unittests/assembler/macro-assembler-riscv-unittest.cc index afda8d3603..8e74ae692c 100644 --- a/test/unittests/assembler/turbo-assembler-riscv-unittest.cc +++ b/test/unittests/assembler/macro-assembler-riscv-unittest.cc @@ -12,33 +12,33 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the x64 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(nullptr, &desc); + masm.GetCode(nullptr, &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -48,7 +48,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(nullptr, &desc); + masm.GetCode(nullptr, &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/turbo-assembler-s390-unittest.cc b/test/unittests/assembler/macro-assembler-s390-unittest.cc similarity index 83% rename from test/unittests/assembler/turbo-assembler-s390-unittest.cc rename to test/unittests/assembler/macro-assembler-s390-unittest.cc index d86a09f67c..b371c841c5 100644 --- a/test/unittests/assembler/turbo-assembler-s390-unittest.cc +++ b/test/unittests/assembler/macro-assembler-s390-unittest.cc @@ -12,17 +12,17 @@ namespace v8 { namespace internal { -#define __ tasm. +#define __ masm. // Test the s390 assembler by compiling some simple functions into // a buffer and executing them. These tests do not initialize the // V8 library, create a context, or use any V8 objects. -class TurboAssemblerTest : public TestWithIsolate {}; +class MacroAssemblerTest : public TestWithIsolate {}; -TEST_F(TurboAssemblerTest, TestHardAbort) { +TEST_F(MacroAssemblerTest, TestHardAbort) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -30,7 +30,7 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { __ Abort(AbortReason::kNoReason); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); @@ -38,9 +38,9 @@ TEST_F(TurboAssemblerTest, TestHardAbort) { ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); } -TEST_F(TurboAssemblerTest, TestCheck) { +TEST_F(MacroAssemblerTest, TestCheck) { auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, buffer->CreateView()); __ set_root_array_available(false); __ set_abort_hard(true); @@ -52,7 +52,7 @@ TEST_F(TurboAssemblerTest, TestCheck) { __ Ret(); CodeDesc desc; - tasm.GetCode(isolate(), &desc); + masm.GetCode(isolate(), &desc); buffer->MakeExecutable(); // We need an isolate here to execute in the simulator. auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); diff --git a/test/unittests/assembler/macro-assembler-x64-unittest.cc b/test/unittests/assembler/macro-assembler-x64-unittest.cc index e2fdafb580..cf7e277080 100644 --- a/test/unittests/assembler/macro-assembler-x64-unittest.cc +++ b/test/unittests/assembler/macro-assembler-x64-unittest.cc @@ -40,6 +40,57 @@ namespace v8 { namespace internal { + +#define __ masm. + +// Test the x64 assembler by compiling some simple functions into +// a buffer and executing them. These tests do not initialize the +// V8 library, create a context, or use any V8 objects. + +using MacroAssemblerX64Test = TestWithIsolate; + +TEST_F(MacroAssemblerX64Test, TestHardAbort) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); + + __ Abort(AbortReason::kNoReason); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + buffer->MakeExecutable(); + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); +} + +TEST_F(MacroAssemblerX64Test, TestCheck) { + auto buffer = AllocateAssemblerBuffer(); + MacroAssembler masm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, + buffer->CreateView()); + __ set_root_array_available(false); + __ set_abort_hard(true); + + // Fail if the first parameter is 17. + __ movl(rax, Immediate(17)); + __ cmpl(rax, arg_reg_1); + __ Check(Condition::not_equal, AbortReason::kNoReason); + __ ret(0); + + CodeDesc desc; + masm.GetCode(isolate(), &desc); + buffer->MakeExecutable(); + auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); + + f.Call(0); + f.Call(18); + ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason"); +} + +#undef __ + namespace test_macro_assembler_x64 { // Test the x64 assembler by compiling some simple functions into @@ -51,8 +102,6 @@ namespace test_macro_assembler_x64 { // This calling convention is used on Linux, with GCC, and on Mac OS, // with GCC. A different convention is used on 64-bit windows. -using MacroAssemblerX64Test = TestWithIsolate; - using F0 = int(); #define __ masm-> diff --git a/test/unittests/assembler/turbo-assembler-arm64-unittest.cc b/test/unittests/assembler/turbo-assembler-arm64-unittest.cc deleted file mode 100644 index 77123ef565..0000000000 --- a/test/unittests/assembler/turbo-assembler-arm64-unittest.cc +++ /dev/null @@ -1,254 +0,0 @@ -// Copyright 2018 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/codegen/arm64/macro-assembler-arm64-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/execution/simulator.h" -#include "src/utils/ostreams.h" -#include "test/common/assembler-tester.h" -#include "test/unittests/test-utils.h" -#include "testing/gtest-support.h" - -namespace v8 { -namespace internal { - -#define __ tasm. - -// If we are running on android and the output is not redirected (i.e. ends up -// in the android log) then we cannot find the error message in the output. This -// macro just returns the empty string in that case. -#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) -#define ERROR_MESSAGE(msg) "" -#else -#define ERROR_MESSAGE(msg) msg -#endif - -// Test the x64 assembler by compiling some simple functions into -// a buffer and executing them. These tests do not initialize the -// V8 library, create a context, or use any V8 objects. - -class TurboAssemblerTest : public TestWithIsolate {}; - -TEST_F(TurboAssemblerTest, TestHardAbort) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - { - AssemblerBufferWriteScope rw_scope(*buffer); - - __ CodeEntry(); - - __ Abort(AbortReason::kNoReason); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - } - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, ERROR_MESSAGE("abort: no reason")); -} - -TEST_F(TurboAssemblerTest, TestCheck) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - { - AssemblerBufferWriteScope rw_scope(*buffer); - - __ CodeEntry(); - - // Fail if the first parameter is 17. - __ Mov(w1, Immediate(17)); - __ Cmp(w0, w1); // 1st parameter is in {w0}. - __ Check(Condition::ne, AbortReason::kNoReason); - __ Ret(); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - } - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - f.Call(0); - f.Call(18); - ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, ERROR_MESSAGE("abort: no reason")); -} - -TEST_F(TurboAssemblerTest, CompareAndBranch) { - const int kTestCases[] = {-42, 0, 42}; - static_assert(Condition::eq == 0); - static_assert(Condition::le == 13); - TRACED_FORRANGE(int, cc, 0, 13) { // All conds except al and nv - Condition cond = static_cast(cc); - TRACED_FOREACH(int, imm, kTestCases) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, - CodeObjectRequired::kNo, buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - { - AssemblerBufferWriteScope rw_scope(*buffer); - - __ CodeEntry(); - - Label start, lab; - __ Bind(&start); - __ CompareAndBranch(x0, Immediate(imm), cond, &lab); - if (imm == 0 && ((cond == eq) || (cond == ne) || (cond == hi) || - (cond == ls))) { // One instruction generated - ASSERT_EQ(kInstrSize, __ SizeOfCodeGeneratedSince(&start)); - } else { // Two instructions generated - ASSERT_EQ(static_cast(2 * kInstrSize), - __ SizeOfCodeGeneratedSince(&start)); - } - __ Cmp(x0, Immediate(imm)); - __ Check(NegateCondition(cond), - AbortReason::kNoReason); // cond must not hold - __ Ret(); - __ Bind(&lab); // Branch leads here - __ Cmp(x0, Immediate(imm)); - __ Check(cond, AbortReason::kNoReason); // cond must hold - __ Ret(); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - } - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - TRACED_FOREACH(int, n, kTestCases) { f.Call(n); } - } - } -} - -struct MoveObjectAndSlotTestCase { - const char* comment; - Register dst_object; - Register dst_slot; - Register object; - Register offset_register = no_reg; -}; - -const MoveObjectAndSlotTestCase kMoveObjectAndSlotTestCases[] = { - {"no overlap", x0, x1, x2}, - {"no overlap", x0, x1, x2, x3}, - - {"object == dst_object", x2, x1, x2}, - {"object == dst_object", x2, x1, x2, x3}, - - {"object == dst_slot", x1, x2, x2}, - {"object == dst_slot", x1, x2, x2, x3}, - - {"offset == dst_object", x0, x1, x2, x0}, - - {"offset == dst_object && object == dst_slot", x0, x1, x1, x0}, - - {"offset == dst_slot", x0, x1, x2, x1}, - - {"offset == dst_slot && object == dst_object", x0, x1, x0, x1}}; - -// Make sure we include offsets that cannot be encoded in an add instruction. -const int kOffsets[] = {0, 42, kMaxRegularHeapObjectSize, 0x101001}; - -template -class TurboAssemblerTestWithParam : public TurboAssemblerTest, - public ::testing::WithParamInterface {}; - -using TurboAssemblerTestMoveObjectAndSlot = - TurboAssemblerTestWithParam; - -TEST_P(TurboAssemblerTestMoveObjectAndSlot, MoveObjectAndSlot) { - const MoveObjectAndSlotTestCase test_case = GetParam(); - TRACED_FOREACH(int32_t, offset, kOffsets) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - - { - AssemblerBufferWriteScope rw_buffer_scope(*buffer); - - __ CodeEntry(); - __ Push(x0, padreg); - __ Mov(test_case.object, x1); - - Register src_object = test_case.object; - Register dst_object = test_case.dst_object; - Register dst_slot = test_case.dst_slot; - - Operand offset_operand(0); - if (test_case.offset_register == no_reg) { - offset_operand = Operand(offset); - } else { - __ Mov(test_case.offset_register, Operand(offset)); - offset_operand = Operand(test_case.offset_register); - } - - std::stringstream comment; - comment << "-- " << test_case.comment << ": MoveObjectAndSlot(" - << dst_object << ", " << dst_slot << ", " << src_object << ", "; - if (test_case.offset_register == no_reg) { - comment << "#" << offset; - } else { - comment << test_case.offset_register; - } - comment << ") --"; - __ RecordComment(comment.str().c_str()); - __ MoveObjectAndSlot(dst_object, dst_slot, src_object, offset_operand); - __ RecordComment("--"); - - // The `result` pointer was saved on the stack. - UseScratchRegisterScope temps(&tasm); - Register scratch = temps.AcquireX(); - __ Pop(padreg, scratch); - __ Str(dst_object, MemOperand(scratch)); - __ Str(dst_slot, MemOperand(scratch, kSystemPointerSize)); - - __ Ret(); - - CodeDesc desc; - tasm.GetCode(nullptr, &desc); - if (v8_flags.print_code) { - Handle code = - Factory::CodeBuilder(isolate(), desc, CodeKind::FOR_TESTING) - .Build(); - StdoutStream os; - code->Print(os); - } - } - - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), - buffer->start()); - - byte* object = new byte[offset]; - byte* result[] = {nullptr, nullptr}; - - f.Call(result, object); - - // The first element must be the address of the object, and the second the - // slot addressed by `offset`. - EXPECT_EQ(result[0], &object[0]); - EXPECT_EQ(result[1], &object[offset]); - - delete[] object; - } -} - -INSTANTIATE_TEST_SUITE_P(TurboAssemblerTest, - TurboAssemblerTestMoveObjectAndSlot, - ::testing::ValuesIn(kMoveObjectAndSlotTestCases)); - -#undef __ -#undef ERROR_MESSAGE - -} // namespace internal -} // namespace v8 diff --git a/test/unittests/assembler/turbo-assembler-x64-unittest.cc b/test/unittests/assembler/turbo-assembler-x64-unittest.cc deleted file mode 100644 index 43dd6b79d6..0000000000 --- a/test/unittests/assembler/turbo-assembler-x64-unittest.cc +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2018 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/codegen/macro-assembler.h" -#include "src/execution/simulator.h" -#include "test/common/assembler-tester.h" -#include "test/unittests/test-utils.h" -#include "testing/gtest-support.h" - -namespace v8 { -namespace internal { - -#define __ tasm. - -// Test the x64 assembler by compiling some simple functions into -// a buffer and executing them. These tests do not initialize the -// V8 library, create a context, or use any V8 objects. - -class TurboAssemblerTest : public TestWithIsolate {}; - -TEST_F(TurboAssemblerTest, TestHardAbort) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - __ Abort(AbortReason::kNoReason); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - buffer->MakeExecutable(); - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); -} - -TEST_F(TurboAssemblerTest, TestCheck) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - // Fail if the first parameter is 17. - __ movl(rax, Immediate(17)); - __ cmpl(rax, arg_reg_1); - __ Check(Condition::not_equal, AbortReason::kNoReason); - __ ret(0); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - buffer->MakeExecutable(); - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - f.Call(0); - f.Call(18); - ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason"); -} - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/test/unittests/heap/cppgc/member-unittest.cc b/test/unittests/heap/cppgc/member-unittest.cc index 78c0bffc8d..03cf1383fa 100644 --- a/test/unittests/heap/cppgc/member-unittest.cc +++ b/test/unittests/heap/cppgc/member-unittest.cc @@ -65,9 +65,11 @@ struct CustomWriteBarrierPolicy { static void InitializingBarrier(const void* slot, const void* value) { ++InitializingWriteBarriersTriggered; } + template static void AssigningBarrier(const void* slot, const void* value) { ++AssigningWriteBarriersTriggered; } + template static void AssigningBarrier(const void* slot, DefaultMemberStorage) { ++AssigningWriteBarriersTriggered; } diff --git a/test/unittests/libplatform/tracing-unittest.cc b/test/unittests/libplatform/tracing-unittest.cc index f90b811d0d..1dc349c91a 100644 --- a/test/unittests/libplatform/tracing-unittest.cc +++ b/test/unittests/libplatform/tracing-unittest.cc @@ -11,8 +11,8 @@ #include "testing/gtest/include/gtest/gtest.h" #ifdef V8_USE_PERFETTO -#include "perfetto/tracing/track_event.h" -#include "perfetto/tracing/track_event_legacy.h" +#include "perfetto/tracing/track_event.h" // nogncheck +#include "perfetto/tracing/track_event_legacy.h" // nogncheck #include "protos/perfetto/trace/trace.pb.h" // nogncheck #include "src/libplatform/tracing/trace-event-listener.h" #include "src/tracing/traced-value.h" diff --git a/test/unittests/logging/log-unittest.cc b/test/unittests/logging/log-unittest.cc index ca48d0629c..fdeb11dda7 100644 --- a/test/unittests/logging/log-unittest.cc +++ b/test/unittests/logging/log-unittest.cc @@ -453,7 +453,7 @@ TEST_F(LogTest, Issue539892) { } private: - void LogRecordedBuffer(i::Handle code, + void LogRecordedBuffer(i::AbstractCode code, i::MaybeHandle maybe_shared, const char* name, int length) override {} #if V8_ENABLE_WEBASSEMBLY diff --git a/test/unittests/regexp/regexp-unittest.cc b/test/unittests/regexp/regexp-unittest.cc index dcdee75927..f42b455b39 100644 --- a/test/unittests/regexp/regexp-unittest.cc +++ b/test/unittests/regexp/regexp-unittest.cc @@ -1654,6 +1654,42 @@ void MockUseCounterCallback(v8::Isolate* isolate, v8::Isolate::UseCounterFeature feature) { ++global_use_counts[feature]; } + +void CheckRegExpUnicodeSetIncompatibilitiesUseCounter( + v8::Isolate* isolate, const char* check_pattern) { + int* use_counts = global_use_counts; + int old_count = use_counts + [v8::Isolate::kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode]; + Local context = isolate->GetCurrentContext(); + { + v8_flags.harmony_regexp_unicode_sets = true; + std::ostringstream os; + os << "/[" << check_pattern << "]/v"; + Local v8_source = + v8::String::NewFromUtf8(isolate, os.str().c_str()).ToLocalChecked(); + MaybeLocal script = v8::Script::Compile(context, v8_source); + CHECK(script.IsEmpty()); + CHECK_EQ( + old_count, + use_counts + [v8::Isolate::kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode]); + } + { + std::ostringstream os; + os << "/[" << check_pattern << "]/u"; + Local v8_source = + v8::String::NewFromUtf8(isolate, os.str().c_str()).ToLocalChecked(); + MaybeLocal script = v8::Script::Compile(context, v8_source); + Local result = + script.ToLocalChecked()->Run(context).ToLocalChecked(); + CHECK(result->IsRegExp()); + CHECK_EQ( + old_count + 1, + use_counts + [v8::Isolate::kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode]); + } +} + } // namespace using RegExpTestWithContext = TestWithContext; @@ -1720,6 +1756,14 @@ TEST_F(RegExpTestWithContext, UseCountRegExp) { CHECK_EQ(2, use_counts[v8::Isolate::kRegExpPrototypeStickyGetter]); CHECK_EQ(1, use_counts[v8::Isolate::kRegExpPrototypeToString]); CHECK(resultToStringError->IsObject()); + + const char* incompatible_patterns[] = { + "(", ")", "[", "{", "}", "/", "-", "|", "&&", + "!!", "##", "$$", "%%", "**", "++", ",,", "..", "::", + ";;", "<<", "==", ">>", "??", "@@", "^^^", "``", "~~"}; + for (auto pattern : incompatible_patterns) { + CheckRegExpUnicodeSetIncompatibilitiesUseCounter(v8_isolate(), pattern); + } } class UncachedExternalString diff --git a/tools/debug_helper/debug-helper-internal.cc b/tools/debug_helper/debug-helper-internal.cc index 01e86ed5a5..113a54ab2b 100644 --- a/tools/debug_helper/debug-helper-internal.cc +++ b/tools/debug_helper/debug-helper-internal.cc @@ -28,7 +28,7 @@ uintptr_t EnsureDecompressed(uintptr_t address, if (!COMPRESS_POINTERS_BOOL || !IsPointerCompressed(address)) return address; // TODO(v8:11880): ExternalCodeCompressionScheme might be needed here for // decompressing Code pointers from external code space. - return i::V8HeapCompressionScheme::DecompressTaggedAny( + return i::V8HeapCompressionScheme::DecompressTagged( any_uncompressed_ptr, static_cast(address)); } diff --git a/tools/debug_helper/gen-heap-constants.py b/tools/debug_helper/gen-heap-constants.py index bc7fb335f4..c50ba3bd48 100644 --- a/tools/debug_helper/gen-heap-constants.py +++ b/tools/debug_helper/gen-heap-constants.py @@ -75,7 +75,7 @@ if (hasattr(v8heapconst, 'HEAP_FIRST_PAGES')): # Only exists in ptr-compr build if (space_name in expected_spaces): out = out + ' if (heap_addresses->' + space_name + '_first_page == 0) {\n' out = out + ' heap_addresses->' + space_name + \ - '_first_page = i::V8HeapCompressionScheme::DecompressTaggedPointer(' + \ + '_first_page = i::V8HeapCompressionScheme::DecompressTagged(' + \ 'any_uncompressed_ptr, ' + str(offset) + ');\n' out = out + ' }\n' out = out + '}\n' diff --git a/tools/dev/update-compile-commands.py b/tools/dev/update-compile-commands.py index 26e6be2c25..cf24ad6261 100755 --- a/tools/dev/update-compile-commands.py +++ b/tools/dev/update-compile-commands.py @@ -13,6 +13,11 @@ import json import os import subprocess import sys +import platform + +DEFAULT_ARCH = "x64" +if platform.machine() == "arm64": + DEFAULT_ARCH = "arm64" PYLIB_PATH = 'tools/clang/pylib' GM_PATH = 'tools/dev' @@ -73,9 +78,11 @@ def UpdateCompileCommands(): print(">>> Updating compile_commands.json...") combined = {} AddTargetsForArch("x64", combined) - AddTargetsForArch("ia32", combined) - AddTargetsForArch("arm", combined) AddTargetsForArch("arm64", combined) + if DEFAULT_ARCH != "arm64": + # Mac arm64 doesn't like 32bit platforms: + AddTargetsForArch("ia32", combined) + AddTargetsForArch("arm", combined) commands = [] for key in combined: commands.append(combined[key]) @@ -83,14 +90,16 @@ def UpdateCompileCommands(): def CompileLanguageServer(): print(">>> Compiling Torque Language Server...") - PrepareBuildDir("x64", "release") - _Call("autoninja -C out/x64.release torque-language-server") + PrepareBuildDir(DEFAULT_ARCH, "release") + _Call(f"autoninja -C out/{DEFAULT_ARCH}.release torque-language-server") + def GenerateCCFiles(): print(">>> Generating generated C++ source files...") # This must be called after UpdateCompileCommands(). - assert os.path.exists("out/x64.debug/build.ninja") - _Call("autoninja -C out/x64.debug v8_generated_cc_files") + assert os.path.exists(f"out/{DEFAULT_ARCH}.debug/build.ninja") + _Call(f"autoninja -C out/{DEFAULT_ARCH}.debug v8_generated_cc_files") + def StartGoma(): gomadir = gm.DetectGoma() diff --git a/tools/gcmole/test-expectations.txt b/tools/gcmole/test-expectations.txt index 4b92d78ee6..549fb949f0 100644 --- a/tools/gcmole/test-expectations.txt +++ b/tools/gcmole/test-expectations.txt @@ -5,7 +5,7 @@ tools/gcmole/gcmole-test.cc:28:20: note: Call might cause unexpected GC. isolate->heap()->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting); ^ ./src/heap/heap.h::: note: GC call here. - V8_EXPORT_PRIVATE bool CollectGarbage( + V8_EXPORT_PRIVATE void CollectGarbage( ^ tools/gcmole/gcmole-test.cc:48:3: warning: Possible problem with evaluation order with interleaved GCs. TwoArgumentsFunction(*CauseGC(obj1, isolate), *CauseGC(obj2, isolate)); diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index fcc0762d07..4572bd2658 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -356,19 +356,19 @@ KNOWN_MAPS = { ("read_only_space", 0x02b05): (187, "ClosureFeedbackCellArrayMap"), ("read_only_space", 0x02b2d): (249, "FeedbackVectorMap"), ("read_only_space", 0x02b55): (130, "HeapNumberMap"), - ("read_only_space", 0x02b7d): (128, "SymbolMap"), - ("read_only_space", 0x02ba5): (204, "ForeignMap"), - ("read_only_space", 0x02bcd): (256, "MegaDomHandlerMap"), - ("read_only_space", 0x02bf5): (131, "BooleanMap"), - ("read_only_space", 0x02c1d): (131, "UninitializedMap"), - ("read_only_space", 0x02c45): (131, "ArgumentsMarkerMap"), - ("read_only_space", 0x02c6d): (131, "ExceptionMap"), - ("read_only_space", 0x02c95): (131, "TerminationExceptionMap"), - ("read_only_space", 0x02cbd): (131, "OptimizedOutMap"), - ("read_only_space", 0x02ce5): (131, "StaleRegisterMap"), - ("read_only_space", 0x02d0d): (131, "SelfReferenceMarkerMap"), - ("read_only_space", 0x02d35): (131, "BasicBlockCountersMarkerMap"), - ("read_only_space", 0x02d5d): (129, "BigIntMap"), + ("read_only_space", 0x02b7d): (204, "ForeignMap"), + ("read_only_space", 0x02ba5): (256, "MegaDomHandlerMap"), + ("read_only_space", 0x02bcd): (131, "BooleanMap"), + ("read_only_space", 0x02bf5): (131, "UninitializedMap"), + ("read_only_space", 0x02c1d): (131, "ArgumentsMarkerMap"), + ("read_only_space", 0x02c45): (131, "ExceptionMap"), + ("read_only_space", 0x02c6d): (131, "TerminationExceptionMap"), + ("read_only_space", 0x02c95): (131, "OptimizedOutMap"), + ("read_only_space", 0x02cbd): (131, "StaleRegisterMap"), + ("read_only_space", 0x02ce5): (131, "SelfReferenceMarkerMap"), + ("read_only_space", 0x02d0d): (131, "BasicBlockCountersMarkerMap"), + ("read_only_space", 0x02d35): (129, "BigIntMap"), + ("read_only_space", 0x02d5d): (128, "SymbolMap"), ("read_only_space", 0x02d85): (32, "StringMap"), ("read_only_space", 0x02dad): (40, "OneByteStringMap"), ("read_only_space", 0x02dd5): (33, "ConsStringMap"), diff --git a/tools/v8windbg/src/cur-isolate.cc b/tools/v8windbg/src/cur-isolate.cc index 3a5228f1fa..dd3355efd2 100644 --- a/tools/v8windbg/src/cur-isolate.cc +++ b/tools/v8windbg/src/cur-isolate.cc @@ -4,8 +4,8 @@ #include "tools/v8windbg/src/cur-isolate.h" -HRESULT GetIsolateOffset(WRL::ComPtr& sp_ctx, - ptrdiff_t* isolate_offset) { +HRESULT GetIsolateLocation(WRL::ComPtr& sp_ctx, + Location* location) { auto sp_v8_module = Extension::Current()->GetV8Module(sp_ctx); if (sp_v8_module == nullptr) return E_FAIL; @@ -17,9 +17,7 @@ HRESULT GetIsolateOffset(WRL::ComPtr& sp_ctx, if (kind != SymbolData) return E_FAIL; WRL::ComPtr sp_isolate_key_data; RETURN_IF_FAIL(sp_isolate_sym.As(&sp_isolate_key_data)); - Location location; - RETURN_IF_FAIL(sp_isolate_key_data->GetLocation(&location)); - *isolate_offset = location.Offset; + RETURN_IF_FAIL(sp_isolate_key_data->GetLocation(location)); return S_OK; } @@ -30,34 +28,8 @@ HRESULT GetCurrentIsolate(WRL::ComPtr& sp_result) { WRL::ComPtr sp_host_context; RETURN_IF_FAIL(sp_debug_host->GetCurrentContext(&sp_host_context)); - WRL::ComPtr sp_curr_thread; - RETURN_IF_FAIL(GetCurrentThread(sp_host_context, &sp_curr_thread)); - - WRL::ComPtr sp_environment, sp_environment_block; - WRL::ComPtr sp_tls_pointer, sp_isolate_offset; - RETURN_IF_FAIL( - sp_curr_thread->GetKeyValue(L"Environment", &sp_environment, nullptr)); - - RETURN_IF_FAIL(sp_environment->GetKeyValue(L"EnvironmentBlock", - &sp_environment_block, nullptr)); - - // EnvironmentBlock and TlsSlots are native types (TypeUDT) and thus - // GetRawValue rather than GetKeyValue should be used to get field (member) - // values. - ModelObjectKind kind; - RETURN_IF_FAIL(sp_environment_block->GetKind(&kind)); - if (kind != ModelObjectKind::ObjectTargetObject) return E_FAIL; - - RETURN_IF_FAIL(sp_environment_block->GetRawValue( - SymbolField, L"ThreadLocalStoragePointer", 0, &sp_tls_pointer)); - - ptrdiff_t isolate_offset = -1; - RETURN_IF_FAIL(GetIsolateOffset(sp_host_context, &isolate_offset)); - - uint64_t isolate_ptr; - RETURN_IF_FAIL(UnboxULong64(sp_tls_pointer.Get(), &isolate_ptr)); - isolate_ptr += isolate_offset; - Location isolate_addr{isolate_ptr}; + Location isolate_addr; + RETURN_IF_FAIL(GetIsolateLocation(sp_host_context, &isolate_addr)); // If we got the isolate_key OK, then must have the V8 module loaded // Get the internal Isolate type from it diff --git a/tools/v8windbg/src/cur-isolate.h b/tools/v8windbg/src/cur-isolate.h index 3559256980..65ecba459b 100644 --- a/tools/v8windbg/src/cur-isolate.h +++ b/tools/v8windbg/src/cur-isolate.h @@ -18,7 +18,7 @@ HRESULT GetCurrentIsolate(WRL::ComPtr& sp_result); constexpr wchar_t kIsolateOffset[] = L"v8::internal::g_current_isolate_"; -constexpr wchar_t kIsolate[] = L"v8::internal::Isolate"; +constexpr wchar_t kIsolate[] = L"v8::internal::Isolate *"; class CurrIsolateAlias : public WRL::RuntimeClass< diff --git a/tools/v8windbg/test/v8windbg-test.cc b/tools/v8windbg/test/v8windbg-test.cc index 9eefb044d2..ecbe04a329 100644 --- a/tools/v8windbg/test/v8windbg-test.cc +++ b/tools/v8windbg/test/v8windbg-test.cc @@ -253,6 +253,21 @@ void RunTests() { // {"empty_string \"\"", "SeqOneByteString"}, &output, // p_debug_control.Get()); + // Test for @$curisolate(). This should have the same output with + // `dx v8::internal::g_current_isolate_`. + output.ClearLog(); + CHECK(SUCCEEDED(p_debug_control->Execute( + DEBUG_OUTCTL_ALL_CLIENTS, "dx v8::internal::g_current_isolate_", + DEBUG_EXECUTE_ECHO))); + size_t addr_pos = output.GetLog().find("0x"); + CHECK(addr_pos != std::string::npos); + std::string expected_output = output.GetLog().substr(addr_pos); + + output.ClearLog(); + CHECK(SUCCEEDED(p_debug_control->Execute( + DEBUG_OUTCTL_ALL_CLIENTS, "dx @$curisolate()", DEBUG_EXECUTE_ECHO))); + CHECK_EQ(output.GetLog().substr(output.GetLog().find("0x")), expected_output); + // Detach before exiting hr = p_client->DetachProcesses(); CHECK(SUCCEEDED(hr));