commit
87131415c9
@ -1243,8 +1243,8 @@ filegroup(
|
|||||||
"src/codegen/tick-counter.h",
|
"src/codegen/tick-counter.h",
|
||||||
"src/codegen/tnode.cc",
|
"src/codegen/tnode.cc",
|
||||||
"src/codegen/tnode.h",
|
"src/codegen/tnode.h",
|
||||||
"src/codegen/turbo-assembler.cc",
|
"src/codegen/macro-assembler-base.cc",
|
||||||
"src/codegen/turbo-assembler.h",
|
"src/codegen/macro-assembler-base.h",
|
||||||
"src/codegen/unoptimized-compilation-info.cc",
|
"src/codegen/unoptimized-compilation-info.cc",
|
||||||
"src/codegen/unoptimized-compilation-info.h",
|
"src/codegen/unoptimized-compilation-info.h",
|
||||||
"src/common/assert-scope.cc",
|
"src/common/assert-scope.cc",
|
||||||
|
6
BUILD.gn
6
BUILD.gn
@ -2833,6 +2833,7 @@ v8_header_set("v8_internal_headers") {
|
|||||||
"src/codegen/interface-descriptors.h",
|
"src/codegen/interface-descriptors.h",
|
||||||
"src/codegen/label.h",
|
"src/codegen/label.h",
|
||||||
"src/codegen/machine-type.h",
|
"src/codegen/machine-type.h",
|
||||||
|
"src/codegen/macro-assembler-base.h",
|
||||||
"src/codegen/macro-assembler-inl.h",
|
"src/codegen/macro-assembler-inl.h",
|
||||||
"src/codegen/macro-assembler.h",
|
"src/codegen/macro-assembler.h",
|
||||||
"src/codegen/maglev-safepoint-table.h",
|
"src/codegen/maglev-safepoint-table.h",
|
||||||
@ -2853,7 +2854,6 @@ v8_header_set("v8_internal_headers") {
|
|||||||
"src/codegen/source-position.h",
|
"src/codegen/source-position.h",
|
||||||
"src/codegen/tick-counter.h",
|
"src/codegen/tick-counter.h",
|
||||||
"src/codegen/tnode.h",
|
"src/codegen/tnode.h",
|
||||||
"src/codegen/turbo-assembler.h",
|
|
||||||
"src/codegen/unoptimized-compilation-info.h",
|
"src/codegen/unoptimized-compilation-info.h",
|
||||||
"src/common/assert-scope.h",
|
"src/common/assert-scope.h",
|
||||||
"src/common/checks.h",
|
"src/common/checks.h",
|
||||||
@ -4581,6 +4581,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||||||
"src/codegen/handler-table.cc",
|
"src/codegen/handler-table.cc",
|
||||||
"src/codegen/interface-descriptors.cc",
|
"src/codegen/interface-descriptors.cc",
|
||||||
"src/codegen/machine-type.cc",
|
"src/codegen/machine-type.cc",
|
||||||
|
"src/codegen/macro-assembler-base.cc",
|
||||||
"src/codegen/maglev-safepoint-table.cc",
|
"src/codegen/maglev-safepoint-table.cc",
|
||||||
"src/codegen/optimized-compilation-info.cc",
|
"src/codegen/optimized-compilation-info.cc",
|
||||||
"src/codegen/pending-optimization-table.cc",
|
"src/codegen/pending-optimization-table.cc",
|
||||||
@ -4591,7 +4592,6 @@ v8_source_set("v8_base_without_compiler") {
|
|||||||
"src/codegen/source-position.cc",
|
"src/codegen/source-position.cc",
|
||||||
"src/codegen/tick-counter.cc",
|
"src/codegen/tick-counter.cc",
|
||||||
"src/codegen/tnode.cc",
|
"src/codegen/tnode.cc",
|
||||||
"src/codegen/turbo-assembler.cc",
|
|
||||||
"src/codegen/unoptimized-compilation-info.cc",
|
"src/codegen/unoptimized-compilation-info.cc",
|
||||||
"src/common/assert-scope.cc",
|
"src/common/assert-scope.cc",
|
||||||
"src/common/code-memory-access.cc",
|
"src/common/code-memory-access.cc",
|
||||||
@ -5163,7 +5163,7 @@ v8_source_set("v8_base_without_compiler") {
|
|||||||
if (v8_enable_webassembly) {
|
if (v8_enable_webassembly) {
|
||||||
# Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux,
|
# Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux,
|
||||||
# Mac, and Windows.
|
# Mac, and Windows.
|
||||||
if ((current_cpu == "arm64" && is_mac) ||
|
if ((current_cpu == "arm64" && is_apple) ||
|
||||||
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
|
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
|
||||||
sources += [
|
sources += [
|
||||||
"src/trap-handler/handler-inside-posix.cc",
|
"src/trap-handler/handler-inside-posix.cc",
|
||||||
|
22
DEPS
22
DEPS
@ -63,12 +63,12 @@ vars = {
|
|||||||
'ninja_version': 'version:2@1.11.1.chromium.6',
|
'ninja_version': 'version:2@1.11.1.chromium.6',
|
||||||
|
|
||||||
# luci-go CIPD package version.
|
# luci-go CIPD package version.
|
||||||
'luci_go': 'git_revision:c41d94e382727fc5276cd2771741990543fce337',
|
'luci_go': 'git_revision:46eca1e3a280c340bf58f967aaded13c87ca3859',
|
||||||
|
|
||||||
# Three lines of non-changing comments so that
|
# Three lines of non-changing comments so that
|
||||||
# the commit queue can handle CLs rolling Fuchsia sdk
|
# the commit queue can handle CLs rolling Fuchsia sdk
|
||||||
# and whatever else without interference from each other.
|
# and whatever else without interference from each other.
|
||||||
'fuchsia_version': 'version:11.20230131.1.1',
|
'fuchsia_version': 'version:11.20230202.3.1',
|
||||||
|
|
||||||
# Three lines of non-changing comments so that
|
# Three lines of non-changing comments so that
|
||||||
# the commit queue can handle CLs rolling android_sdk_build-tools_version
|
# the commit queue can handle CLs rolling android_sdk_build-tools_version
|
||||||
@ -106,11 +106,11 @@ vars = {
|
|||||||
|
|
||||||
deps = {
|
deps = {
|
||||||
'base/trace_event/common':
|
'base/trace_event/common':
|
||||||
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c',
|
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '05a225a3e0bbd6fb6a9cac02d482ab784194411d',
|
||||||
'build':
|
'build':
|
||||||
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e0df145ecb560e48381b6dccf3b9c8b31aa95bcd',
|
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'd0fad164969ab7f41f163f9ee738ea692f43df53',
|
||||||
'buildtools':
|
'buildtools':
|
||||||
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '295c6e5037e358904aef73a21409896d58547ba6',
|
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '5408fe0e010a7d36bb2684d5f38df67dcdfe31de',
|
||||||
'buildtools/clang_format/script':
|
'buildtools/clang_format/script':
|
||||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'f97059df7f8b205064625cdb5f97b56668a125ef',
|
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'f97059df7f8b205064625cdb5f97b56668a125ef',
|
||||||
'buildtools/linux64': {
|
'buildtools/linux64': {
|
||||||
@ -134,7 +134,7 @@ deps = {
|
|||||||
'condition': 'host_os == "mac"',
|
'condition': 'host_os == "mac"',
|
||||||
},
|
},
|
||||||
'buildtools/third_party/libc++/trunk':
|
'buildtools/third_party/libc++/trunk':
|
||||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '59bae40d835ae4eabaddbef781f5e3b778dd7907',
|
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '549781a48cef7a038cadbe8ae9034c2d63685d9a',
|
||||||
'buildtools/third_party/libc++abi/trunk':
|
'buildtools/third_party/libc++abi/trunk':
|
||||||
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'b74d7716111d7eda5c03cb8f5dfc940e1c2c0030',
|
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'b74d7716111d7eda5c03cb8f5dfc940e1c2c0030',
|
||||||
'buildtools/third_party/libunwind/trunk':
|
'buildtools/third_party/libunwind/trunk':
|
||||||
@ -164,7 +164,7 @@ deps = {
|
|||||||
'test/mozilla/data':
|
'test/mozilla/data':
|
||||||
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
|
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
|
||||||
'test/test262/data':
|
'test/test262/data':
|
||||||
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f00d4118dba5d266d1611ba2cd4e995d3e4b523a',
|
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'e7364ea7dc36a466edb2db5ef0a8e66da8dabb7d',
|
||||||
'third_party/android_ndk': {
|
'third_party/android_ndk': {
|
||||||
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
|
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
|
||||||
'condition': 'checkout_android',
|
'condition': 'checkout_android',
|
||||||
@ -212,7 +212,7 @@ deps = {
|
|||||||
'dep_type': 'cipd',
|
'dep_type': 'cipd',
|
||||||
},
|
},
|
||||||
'third_party/catapult': {
|
'third_party/catapult': {
|
||||||
'url': Var('chromium_url') + '/catapult.git' + '@' + '5a468ccd919e16a29bb3121e3c90f27bf8745942',
|
'url': Var('chromium_url') + '/catapult.git' + '@' + 'd0d703ea303c91f3afe39ebf8d2d4c9342accedc',
|
||||||
'condition': 'checkout_android',
|
'condition': 'checkout_android',
|
||||||
},
|
},
|
||||||
'third_party/colorama/src': {
|
'third_party/colorama/src': {
|
||||||
@ -220,7 +220,7 @@ deps = {
|
|||||||
'condition': 'checkout_android',
|
'condition': 'checkout_android',
|
||||||
},
|
},
|
||||||
'third_party/depot_tools':
|
'third_party/depot_tools':
|
||||||
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '3d072ab6fb49fd3d2116a41cee66d47c3d409299',
|
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'ef2d011ad3041801565aa8c6d1418cc82c0ddb2e',
|
||||||
'third_party/fuchsia-sdk/sdk': {
|
'third_party/fuchsia-sdk/sdk': {
|
||||||
'packages': [
|
'packages': [
|
||||||
{
|
{
|
||||||
@ -237,9 +237,9 @@ deps = {
|
|||||||
'third_party/googletest/src':
|
'third_party/googletest/src':
|
||||||
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07',
|
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07',
|
||||||
'third_party/icu':
|
'third_party/icu':
|
||||||
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '2c51e5cc7e0a06cd4cd7cb2ddbac445af9b475ba',
|
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '266a46937f05303da1ac4c68f2c94f9a1caa3f76',
|
||||||
'third_party/instrumented_libraries':
|
'third_party/instrumented_libraries':
|
||||||
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '09ba70cfb2c0d01c60684660e357ae200caf2968',
|
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '63d81e44712888bf70d574d5a96fa591994b9acc',
|
||||||
'third_party/ittapi': {
|
'third_party/ittapi': {
|
||||||
# Force checkout ittapi libraries to pass v8 header includes check on
|
# Force checkout ittapi libraries to pass v8 header includes check on
|
||||||
# bots that has check_v8_header_includes enabled.
|
# bots that has check_v8_header_includes enabled.
|
||||||
|
@ -104,6 +104,9 @@
|
|||||||
'trap-handler': {
|
'trap-handler': {
|
||||||
'filepath': 'src/trap-handler/',
|
'filepath': 'src/trap-handler/',
|
||||||
},
|
},
|
||||||
|
'tests': {
|
||||||
|
'filepath': 'test/',
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
'WATCHLISTS': {
|
'WATCHLISTS': {
|
||||||
@ -124,6 +127,7 @@
|
|||||||
],
|
],
|
||||||
'feature_shipping_status': [
|
'feature_shipping_status': [
|
||||||
'hablich@chromium.org',
|
'hablich@chromium.org',
|
||||||
|
'saelo+watch@chromium.org',
|
||||||
],
|
],
|
||||||
'heap_changes': [
|
'heap_changes': [
|
||||||
'hpayer@chromium.org',
|
'hpayer@chromium.org',
|
||||||
@ -176,5 +180,8 @@
|
|||||||
'mark@chromium.org',
|
'mark@chromium.org',
|
||||||
'mseaborn@chromium.org',
|
'mseaborn@chromium.org',
|
||||||
],
|
],
|
||||||
|
'tests': [
|
||||||
|
'almuthanna+watch@chromium.org',
|
||||||
|
],
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,11 @@
|
|||||||
namespace cppgc {
|
namespace cppgc {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
|
enum class WriteBarrierSlotType {
|
||||||
|
kCompressed,
|
||||||
|
kUncompressed,
|
||||||
|
};
|
||||||
|
|
||||||
#if defined(CPPGC_POINTER_COMPRESSION)
|
#if defined(CPPGC_POINTER_COMPRESSION)
|
||||||
|
|
||||||
#if defined(__clang__)
|
#if defined(__clang__)
|
||||||
@ -64,6 +69,8 @@ class CageBaseGlobal final {
|
|||||||
class V8_TRIVIAL_ABI CompressedPointer final {
|
class V8_TRIVIAL_ABI CompressedPointer final {
|
||||||
public:
|
public:
|
||||||
using IntegralType = uint32_t;
|
using IntegralType = uint32_t;
|
||||||
|
static constexpr auto kWriteBarrierSlotType =
|
||||||
|
WriteBarrierSlotType::kCompressed;
|
||||||
|
|
||||||
V8_INLINE CompressedPointer() : value_(0u) {}
|
V8_INLINE CompressedPointer() : value_(0u) {}
|
||||||
V8_INLINE explicit CompressedPointer(const void* ptr)
|
V8_INLINE explicit CompressedPointer(const void* ptr)
|
||||||
@ -173,6 +180,8 @@ class V8_TRIVIAL_ABI CompressedPointer final {
|
|||||||
class V8_TRIVIAL_ABI RawPointer final {
|
class V8_TRIVIAL_ABI RawPointer final {
|
||||||
public:
|
public:
|
||||||
using IntegralType = uintptr_t;
|
using IntegralType = uintptr_t;
|
||||||
|
static constexpr auto kWriteBarrierSlotType =
|
||||||
|
WriteBarrierSlotType::kUncompressed;
|
||||||
|
|
||||||
V8_INLINE RawPointer() : ptr_(nullptr) {}
|
V8_INLINE RawPointer() : ptr_(nullptr) {}
|
||||||
V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {}
|
V8_INLINE explicit RawPointer(const void* ptr) : ptr_(ptr) {}
|
||||||
|
@ -33,10 +33,11 @@ struct DijkstraWriteBarrierPolicy {
|
|||||||
// barrier doesn't break the tri-color invariant.
|
// barrier doesn't break the tri-color invariant.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <WriteBarrierSlotType SlotType>
|
||||||
V8_INLINE static void AssigningBarrier(const void* slot, const void* value) {
|
V8_INLINE static void AssigningBarrier(const void* slot, const void* value) {
|
||||||
#ifdef CPPGC_SLIM_WRITE_BARRIER
|
#ifdef CPPGC_SLIM_WRITE_BARRIER
|
||||||
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
|
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
|
||||||
WriteBarrier::CombinedWriteBarrierSlow(slot);
|
WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
|
||||||
#else // !CPPGC_SLIM_WRITE_BARRIER
|
#else // !CPPGC_SLIM_WRITE_BARRIER
|
||||||
WriteBarrier::Params params;
|
WriteBarrier::Params params;
|
||||||
const WriteBarrier::Type type =
|
const WriteBarrier::Type type =
|
||||||
@ -45,12 +46,14 @@ struct DijkstraWriteBarrierPolicy {
|
|||||||
#endif // !CPPGC_SLIM_WRITE_BARRIER
|
#endif // !CPPGC_SLIM_WRITE_BARRIER
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename MemberStorage>
|
template <WriteBarrierSlotType SlotType>
|
||||||
V8_INLINE static void AssigningBarrier(const void* slot,
|
V8_INLINE static void AssigningBarrier(const void* slot, RawPointer storage) {
|
||||||
MemberStorage storage) {
|
static_assert(
|
||||||
|
SlotType == WriteBarrierSlotType::kUncompressed,
|
||||||
|
"Assigning storages of Member and UncompressedMember is not supported");
|
||||||
#ifdef CPPGC_SLIM_WRITE_BARRIER
|
#ifdef CPPGC_SLIM_WRITE_BARRIER
|
||||||
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
|
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
|
||||||
WriteBarrier::CombinedWriteBarrierSlow(slot);
|
WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
|
||||||
#else // !CPPGC_SLIM_WRITE_BARRIER
|
#else // !CPPGC_SLIM_WRITE_BARRIER
|
||||||
WriteBarrier::Params params;
|
WriteBarrier::Params params;
|
||||||
const WriteBarrier::Type type =
|
const WriteBarrier::Type type =
|
||||||
@ -59,6 +62,25 @@ struct DijkstraWriteBarrierPolicy {
|
|||||||
#endif // !CPPGC_SLIM_WRITE_BARRIER
|
#endif // !CPPGC_SLIM_WRITE_BARRIER
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CPPGC_POINTER_COMPRESSION)
|
||||||
|
template <WriteBarrierSlotType SlotType>
|
||||||
|
V8_INLINE static void AssigningBarrier(const void* slot,
|
||||||
|
CompressedPointer storage) {
|
||||||
|
static_assert(
|
||||||
|
SlotType == WriteBarrierSlotType::kCompressed,
|
||||||
|
"Assigning storages of Member and UncompressedMember is not supported");
|
||||||
|
#ifdef CPPGC_SLIM_WRITE_BARRIER
|
||||||
|
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
|
||||||
|
WriteBarrier::CombinedWriteBarrierSlow<SlotType>(slot);
|
||||||
|
#else // !CPPGC_SLIM_WRITE_BARRIER
|
||||||
|
WriteBarrier::Params params;
|
||||||
|
const WriteBarrier::Type type =
|
||||||
|
WriteBarrier::GetWriteBarrierType(slot, storage, params);
|
||||||
|
WriteBarrier(type, params, slot, storage.Load());
|
||||||
|
#endif // !CPPGC_SLIM_WRITE_BARRIER
|
||||||
|
}
|
||||||
|
#endif // defined(CPPGC_POINTER_COMPRESSION)
|
||||||
|
|
||||||
private:
|
private:
|
||||||
V8_INLINE static void WriteBarrier(WriteBarrier::Type type,
|
V8_INLINE static void WriteBarrier(WriteBarrier::Type type,
|
||||||
const WriteBarrier::Params& params,
|
const WriteBarrier::Params& params,
|
||||||
@ -79,8 +101,9 @@ struct DijkstraWriteBarrierPolicy {
|
|||||||
|
|
||||||
struct NoWriteBarrierPolicy {
|
struct NoWriteBarrierPolicy {
|
||||||
V8_INLINE static void InitializingBarrier(const void*, const void*) {}
|
V8_INLINE static void InitializingBarrier(const void*, const void*) {}
|
||||||
|
template <WriteBarrierSlotType>
|
||||||
V8_INLINE static void AssigningBarrier(const void*, const void*) {}
|
V8_INLINE static void AssigningBarrier(const void*, const void*) {}
|
||||||
template <typename MemberStorage>
|
template <WriteBarrierSlotType, typename MemberStorage>
|
||||||
V8_INLINE static void AssigningBarrier(const void*, MemberStorage) {}
|
V8_INLINE static void AssigningBarrier(const void*, MemberStorage) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -84,6 +84,7 @@ class V8_EXPORT WriteBarrier final {
|
|||||||
// A write barrier that combines `GenerationalBarrier()` and
|
// A write barrier that combines `GenerationalBarrier()` and
|
||||||
// `DijkstraMarkingBarrier()`. We only pass a single parameter here to clobber
|
// `DijkstraMarkingBarrier()`. We only pass a single parameter here to clobber
|
||||||
// as few registers as possible.
|
// as few registers as possible.
|
||||||
|
template <WriteBarrierSlotType>
|
||||||
static V8_NOINLINE void V8_PRESERVE_MOST
|
static V8_NOINLINE void V8_PRESERVE_MOST
|
||||||
CombinedWriteBarrierSlow(const void* slot);
|
CombinedWriteBarrierSlow(const void* slot);
|
||||||
#endif // CPPGC_SLIM_WRITE_BARRIER
|
#endif // CPPGC_SLIM_WRITE_BARRIER
|
||||||
|
@ -309,11 +309,13 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase<StorageType>,
|
|||||||
WriteBarrierPolicy::InitializingBarrier(Base::GetRawSlot(), value);
|
WriteBarrierPolicy::InitializingBarrier(Base::GetRawSlot(), value);
|
||||||
}
|
}
|
||||||
V8_INLINE void AssigningWriteBarrier(T* value) const {
|
V8_INLINE void AssigningWriteBarrier(T* value) const {
|
||||||
WriteBarrierPolicy::AssigningBarrier(Base::GetRawSlot(), value);
|
WriteBarrierPolicy::template AssigningBarrier<
|
||||||
|
StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(), value);
|
||||||
}
|
}
|
||||||
V8_INLINE void AssigningWriteBarrier() const {
|
V8_INLINE void AssigningWriteBarrier() const {
|
||||||
WriteBarrierPolicy::AssigningBarrier(Base::GetRawSlot(),
|
WriteBarrierPolicy::template AssigningBarrier<
|
||||||
Base::GetRawStorage());
|
StorageType::kWriteBarrierSlotType>(Base::GetRawSlot(),
|
||||||
|
Base::GetRawStorage());
|
||||||
}
|
}
|
||||||
|
|
||||||
V8_INLINE void ClearFromGC() const { Base::ClearFromGC(); }
|
V8_INLINE void ClearFromGC() const { Base::ClearFromGC(); }
|
||||||
|
@ -365,8 +365,7 @@ Local<Value> Context::GetEmbedderData(int index) {
|
|||||||
#ifdef V8_COMPRESS_POINTERS
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
// We read the full pointer value and then decompress it in order to avoid
|
// We read the full pointer value and then decompress it in order to avoid
|
||||||
// dealing with potential endiannes issues.
|
// dealing with potential endiannes issues.
|
||||||
value =
|
value = I::DecompressTaggedField(embedder_data, static_cast<uint32_t>(value));
|
||||||
I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
|
|
||||||
#endif
|
#endif
|
||||||
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
|
internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
|
||||||
*reinterpret_cast<A*>(this));
|
*reinterpret_cast<A*>(this));
|
||||||
|
@ -880,7 +880,7 @@ class Internals {
|
|||||||
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
|
return addr & -static_cast<intptr_t>(kPtrComprCageBaseAlignment);
|
||||||
}
|
}
|
||||||
|
|
||||||
V8_INLINE static internal::Address DecompressTaggedAnyField(
|
V8_INLINE static internal::Address DecompressTaggedField(
|
||||||
internal::Address heap_object_ptr, uint32_t value) {
|
internal::Address heap_object_ptr, uint32_t value) {
|
||||||
internal::Address base =
|
internal::Address base =
|
||||||
GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
|
GetPtrComprCageBaseFromOnHeapAddress(heap_object_ptr);
|
||||||
|
@ -542,6 +542,7 @@ class V8_EXPORT Isolate {
|
|||||||
kAsyncStackTaggingCreateTaskCall = 116,
|
kAsyncStackTaggingCreateTaskCall = 116,
|
||||||
kDurationFormat = 117,
|
kDurationFormat = 117,
|
||||||
kInvalidatedNumberStringPrototypeNoReplaceProtector = 118,
|
kInvalidatedNumberStringPrototypeNoReplaceProtector = 118,
|
||||||
|
kRegExpUnicodeSetIncompatibilitiesWithUnicodeMode = 119,
|
||||||
|
|
||||||
// If you add new values here, you'll also need to update Chromium's:
|
// If you add new values here, you'll also need to update Chromium's:
|
||||||
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
|
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
|
||||||
|
@ -717,7 +717,7 @@ Local<Value> Object::GetInternalField(int index) {
|
|||||||
#ifdef V8_COMPRESS_POINTERS
|
#ifdef V8_COMPRESS_POINTERS
|
||||||
// We read the full pointer value and then decompress it in order to avoid
|
// We read the full pointer value and then decompress it in order to avoid
|
||||||
// dealing with potential endiannes issues.
|
// dealing with potential endiannes issues.
|
||||||
value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
|
value = I::DecompressTaggedField(obj, static_cast<uint32_t>(value));
|
||||||
#endif
|
#endif
|
||||||
internal::Isolate* isolate =
|
internal::Isolate* isolate =
|
||||||
internal::IsolateFromNeverReadOnlySpaceObject(obj);
|
internal::IsolateFromNeverReadOnlySpaceObject(obj);
|
||||||
|
@ -346,12 +346,15 @@ path. Add it with -I<path> to the command line
|
|||||||
# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull))
|
# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull))
|
||||||
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
|
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
|
||||||
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
|
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
|
||||||
// Support for the "preserve_most" attribute is incomplete on 32-bit, and we see
|
// Support for the "preserve_most" attribute is limited:
|
||||||
// failures in component builds. Thus only use it in 64-bit non-component builds
|
// - 32-bit platforms do not implement it,
|
||||||
// for now.
|
// - component builds fail because _dl_runtime_resolve clobbers registers,
|
||||||
#if (defined(_M_X64) || defined(__x86_64__) || defined(__AARCH64EL__) || \
|
// - we see crashes on arm64 on Windows (https://crbug.com/1409934), which can
|
||||||
defined(_M_ARM64)) /* x64 or arm64 */ \
|
// hopefully be fixed in the future.
|
||||||
&& !defined(COMPONENT_BUILD)
|
#if (defined(_M_X64) || defined(__x86_64__) /* x64 (everywhere) */ \
|
||||||
|
|| ((defined(__AARCH64EL__) || defined(_M_ARM64)) /* arm64, but ... */ \
|
||||||
|
&& !defined(_WIN32))) /* not on windows */ \
|
||||||
|
&& !defined(COMPONENT_BUILD) /* no component build */
|
||||||
# define V8_HAS_ATTRIBUTE_PRESERVE_MOST (__has_attribute(preserve_most))
|
# define V8_HAS_ATTRIBUTE_PRESERVE_MOST (__has_attribute(preserve_most))
|
||||||
#endif
|
#endif
|
||||||
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
|
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
|
||||||
|
@ -48,14 +48,13 @@ std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
|
|||||||
for (unsigned int i = 0; i < images_count; ++i) {
|
for (unsigned int i = 0; i < images_count; ++i) {
|
||||||
const mach_header* header = _dyld_get_image_header(i);
|
const mach_header* header = _dyld_get_image_header(i);
|
||||||
if (header == nullptr) continue;
|
if (header == nullptr) continue;
|
||||||
|
unsigned long size;
|
||||||
#if V8_HOST_ARCH_I32
|
#if V8_HOST_ARCH_I32
|
||||||
unsigned int size;
|
uint8_t* code_ptr = getsectiondata(header, SEG_TEXT, SECT_TEXT, &size);
|
||||||
char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
|
|
||||||
#else
|
#else
|
||||||
uint64_t size;
|
const mach_header_64* header64 =
|
||||||
char* code_ptr = getsectdatafromheader_64(
|
reinterpret_cast<const mach_header_64*>(header);
|
||||||
reinterpret_cast<const mach_header_64*>(header), SEG_TEXT, SECT_TEXT,
|
uint8_t* code_ptr = getsectiondata(header64, SEG_TEXT, SECT_TEXT, &size);
|
||||||
&size);
|
|
||||||
#endif
|
#endif
|
||||||
if (code_ptr == nullptr) continue;
|
if (code_ptr == nullptr) continue;
|
||||||
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
|
const intptr_t slide = _dyld_get_image_vmaddr_slide(i);
|
||||||
|
@ -81,13 +81,13 @@ class SmallVector {
|
|||||||
begin_ = other.begin_;
|
begin_ = other.begin_;
|
||||||
end_ = other.end_;
|
end_ = other.end_;
|
||||||
end_of_storage_ = other.end_of_storage_;
|
end_of_storage_ = other.end_of_storage_;
|
||||||
other.reset_to_inline_storage();
|
|
||||||
} else {
|
} else {
|
||||||
DCHECK_GE(capacity(), other.size()); // Sanity check.
|
DCHECK_GE(capacity(), other.size()); // Sanity check.
|
||||||
size_t other_size = other.size();
|
size_t other_size = other.size();
|
||||||
memcpy(begin_, other.begin_, sizeof(T) * other_size);
|
memcpy(begin_, other.begin_, sizeof(T) * other_size);
|
||||||
end_ = begin_ + other_size;
|
end_ = begin_ + other_size;
|
||||||
}
|
}
|
||||||
|
other.reset_to_inline_storage();
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,8 +309,8 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ ldr(output, FieldMemOperand(source, offset));
|
__ ldr(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -326,11 +326,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ ldr(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ ldrh(output, FieldMemOperand(source, offset));
|
__ ldrh(output, FieldMemOperand(source, offset));
|
||||||
@ -372,8 +367,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -398,8 +393,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ ldr(interrupt_budget,
|
__ ldr(interrupt_budget,
|
||||||
@ -421,8 +416,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ ldr(interrupt_budget,
|
__ ldr(interrupt_budget,
|
||||||
@ -437,16 +432,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -455,33 +450,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
@ -570,8 +561,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
|||||||
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
|
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ masm()->Ret();
|
__ masm()->Ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -369,9 +369,9 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
|
__ LoadTaggedField(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -386,11 +386,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ Ldrh(output, FieldMemOperand(source, offset));
|
__ Ldrh(output, FieldMemOperand(source, offset));
|
||||||
@ -440,8 +435,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||||
__ Ldr(interrupt_budget,
|
__ Ldr(interrupt_budget,
|
||||||
@ -463,8 +458,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
Register interrupt_budget = scratch_scope.AcquireScratch().W();
|
||||||
__ Ldr(interrupt_budget,
|
__ Ldr(interrupt_budget,
|
||||||
@ -479,16 +474,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -497,33 +492,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
@ -571,7 +562,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
|
|||||||
{
|
{
|
||||||
const int instruction_count =
|
const int instruction_count =
|
||||||
num_labels * instructions_per_label + instructions_per_jump_target;
|
num_labels * instructions_per_label + instructions_per_jump_target;
|
||||||
TurboAssembler::BlockPoolsScope block_pools(masm_,
|
MacroAssembler::BlockPoolsScope block_pools(masm_,
|
||||||
instruction_count * kInstrSize);
|
instruction_count * kInstrSize);
|
||||||
__ Bind(&table);
|
__ Bind(&table);
|
||||||
for (int i = 0; i < num_labels; ++i) {
|
for (int i = 0; i < num_labels; ++i) {
|
||||||
@ -630,7 +621,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
|||||||
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ masm()->DropArguments(params_size, TurboAssembler::kCountIncludesReceiver);
|
__ masm()->DropArguments(params_size, MacroAssembler::kCountIncludesReceiver);
|
||||||
__ masm()->Ret();
|
__ masm()->Ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,13 +114,12 @@ void BaselineAssembler::SmiUntag(Register output, Register value) {
|
|||||||
|
|
||||||
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
|
void BaselineAssembler::LoadFixedArrayElement(Register output, Register array,
|
||||||
int32_t index) {
|
int32_t index) {
|
||||||
LoadTaggedAnyField(output, array,
|
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
|
||||||
FixedArray::kHeaderSize + index * kTaggedSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
|
void BaselineAssembler::LoadPrototype(Register prototype, Register object) {
|
||||||
__ LoadMap(prototype, object);
|
__ LoadMap(prototype, object);
|
||||||
LoadTaggedPointerField(prototype, prototype, Map::kPrototypeOffset);
|
LoadTaggedField(prototype, prototype, Map::kPrototypeOffset);
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadContext(Register output) {
|
void BaselineAssembler::LoadContext(Register output) {
|
||||||
LoadRegister(output, interpreter::Register::current_context());
|
LoadRegister(output, interpreter::Register::current_context());
|
||||||
|
@ -147,13 +147,11 @@ class BaselineAssembler {
|
|||||||
inline void TailCallBuiltin(Builtin builtin);
|
inline void TailCallBuiltin(Builtin builtin);
|
||||||
inline void CallRuntime(Runtime::FunctionId function, int nargs);
|
inline void CallRuntime(Runtime::FunctionId function, int nargs);
|
||||||
|
|
||||||
inline void LoadTaggedPointerField(Register output, Register source,
|
inline void LoadTaggedField(Register output, Register source, int offset);
|
||||||
int offset);
|
|
||||||
inline void LoadTaggedSignedField(Register output, Register source,
|
inline void LoadTaggedSignedField(Register output, Register source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadTaggedSignedFieldAndUntag(Register output, Register source,
|
inline void LoadTaggedSignedFieldAndUntag(Register output, Register source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadTaggedAnyField(Register output, Register source, int offset);
|
|
||||||
inline void LoadWord16FieldZeroExtend(Register output, Register source,
|
inline void LoadWord16FieldZeroExtend(Register output, Register source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadWord8Field(Register output, Register source, int offset);
|
inline void LoadWord8Field(Register output, Register source, int offset);
|
||||||
@ -170,16 +168,12 @@ class BaselineAssembler {
|
|||||||
// X64 supports complex addressing mode, pointer decompression can be done by
|
// X64 supports complex addressing mode, pointer decompression can be done by
|
||||||
// [%compressed_base + %r1 + K].
|
// [%compressed_base + %r1 + K].
|
||||||
#if V8_TARGET_ARCH_X64
|
#if V8_TARGET_ARCH_X64
|
||||||
inline void LoadTaggedPointerField(TaggedRegister output, Register source,
|
inline void LoadTaggedField(TaggedRegister output, Register source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadTaggedPointerField(TaggedRegister output,
|
inline void LoadTaggedField(TaggedRegister output, TaggedRegister source,
|
||||||
TaggedRegister source, int offset);
|
int offset);
|
||||||
inline void LoadTaggedPointerField(Register output, TaggedRegister source,
|
inline void LoadTaggedField(Register output, TaggedRegister source,
|
||||||
int offset);
|
int offset);
|
||||||
inline void LoadTaggedAnyField(Register output, TaggedRegister source,
|
|
||||||
int offset);
|
|
||||||
inline void LoadTaggedAnyField(TaggedRegister output, TaggedRegister source,
|
|
||||||
int offset);
|
|
||||||
inline void LoadFixedArrayElement(Register output, TaggedRegister array,
|
inline void LoadFixedArrayElement(Register output, TaggedRegister array,
|
||||||
int32_t index);
|
int32_t index);
|
||||||
inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array,
|
inline void LoadFixedArrayElement(TaggedRegister output, TaggedRegister array,
|
||||||
|
@ -439,8 +439,8 @@ void BaselineCompiler::LoadFeedbackVector(Register output) {
|
|||||||
|
|
||||||
void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
|
void BaselineCompiler::LoadClosureFeedbackArray(Register output) {
|
||||||
LoadFeedbackVector(output);
|
LoadFeedbackVector(output);
|
||||||
__ LoadTaggedPointerField(output, output,
|
__ LoadTaggedField(output, output,
|
||||||
FeedbackVector::kClosureFeedbackCellArrayOffset);
|
FeedbackVector::kClosureFeedbackCellArrayOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineCompiler::SelectBooleanConstant(
|
void BaselineCompiler::SelectBooleanConstant(
|
||||||
@ -754,8 +754,8 @@ void BaselineCompiler::VisitLdaCurrentContextSlot() {
|
|||||||
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_);
|
||||||
Register context = scratch_scope.AcquireScratch();
|
Register context = scratch_scope.AcquireScratch();
|
||||||
__ LoadContext(context);
|
__ LoadContext(context);
|
||||||
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
__ LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(Index(0)));
|
Context::OffsetOfElementAt(Index(0)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
|
void BaselineCompiler::VisitLdaImmutableCurrentContextSlot() {
|
||||||
@ -1350,9 +1350,9 @@ void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject(
|
|||||||
void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
|
void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode(
|
||||||
interpreter::RegisterList args) {
|
interpreter::RegisterList args) {
|
||||||
__ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
|
__ LoadRegister(kInterpreterAccumulatorRegister, args[0]);
|
||||||
__ LoadTaggedAnyField(kInterpreterAccumulatorRegister,
|
__ LoadTaggedField(kInterpreterAccumulatorRegister,
|
||||||
kInterpreterAccumulatorRegister,
|
kInterpreterAccumulatorRegister,
|
||||||
JSGeneratorObject::kResumeModeOffset);
|
JSGeneratorObject::kResumeModeOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineCompiler::VisitIntrinsicGeneratorClose(
|
void BaselineCompiler::VisitIntrinsicGeneratorClose(
|
||||||
@ -2211,8 +2211,8 @@ void BaselineCompiler::VisitSwitchOnGeneratorState() {
|
|||||||
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
|
Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
|
||||||
|
|
||||||
Register context = scratch_scope.AcquireScratch();
|
Register context = scratch_scope.AcquireScratch();
|
||||||
__ LoadTaggedAnyField(context, generator_object,
|
__ LoadTaggedField(context, generator_object,
|
||||||
JSGeneratorObject::kContextOffset);
|
JSGeneratorObject::kContextOffset);
|
||||||
__ StoreContext(context);
|
__ StoreContext(context);
|
||||||
|
|
||||||
interpreter::JumpTableTargetOffsets offsets =
|
interpreter::JumpTableTargetOffsets offsets =
|
||||||
|
@ -293,8 +293,8 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
(__ Pop(registers), ...);
|
(__ Pop(registers), ...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ mov(output, FieldOperand(source, offset));
|
__ mov(output, FieldOperand(source, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,11 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ mov(output, FieldOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ movzx_w(output, FieldOperand(source, offset));
|
__ movzx_w(output, FieldOperand(source, offset));
|
||||||
@ -354,8 +349,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance distance) {
|
Label::Distance distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -378,8 +373,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||||
Immediate(weight));
|
Immediate(weight));
|
||||||
if (skip_interrupt_label) {
|
if (skip_interrupt_label) {
|
||||||
@ -395,8 +390,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
DCHECK(!AreAliased(feedback_cell, weight));
|
DCHECK(!AreAliased(feedback_cell, weight));
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
__ add(FieldOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset),
|
||||||
weight);
|
weight);
|
||||||
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
||||||
@ -405,16 +400,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -423,33 +418,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
@ -539,8 +530,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ masm()->DropArguments(params_size, scratch,
|
__ masm()->DropArguments(params_size, scratch,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ masm()->Ret();
|
__ masm()->Ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -296,8 +296,8 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ Ld_d(output, FieldMemOperand(source, offset));
|
__ Ld_d(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -310,10 +310,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
LoadTaggedSignedField(output, source, offset);
|
LoadTaggedSignedField(output, source, offset);
|
||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ Ld_d(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ Ld_hu(output, FieldMemOperand(source, offset));
|
__ Ld_hu(output, FieldMemOperand(source, offset));
|
||||||
@ -350,8 +346,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
{
|
{
|
||||||
@ -374,8 +370,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Ld_w(interrupt_budget,
|
__ Ld_w(interrupt_budget,
|
||||||
@ -394,8 +390,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Ld_w(interrupt_budget,
|
__ Ld_w(interrupt_budget,
|
||||||
@ -410,16 +406,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -428,33 +424,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
@ -533,8 +525,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
|||||||
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
|
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ masm()->Ret();
|
__ masm()->Ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -304,8 +304,8 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ Ld(output, FieldMemOperand(source, offset));
|
__ Ld(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -318,10 +318,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
LoadTaggedSignedField(output, source, offset);
|
LoadTaggedSignedField(output, source, offset);
|
||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ Ld(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ Lhu(output, FieldMemOperand(source, offset));
|
__ Lhu(output, FieldMemOperand(source, offset));
|
||||||
@ -360,8 +356,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
{
|
{
|
||||||
@ -384,8 +380,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Lw(interrupt_budget,
|
__ Lw(interrupt_budget,
|
||||||
@ -404,8 +400,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Lw(interrupt_budget,
|
__ Lw(interrupt_budget,
|
||||||
@ -420,16 +416,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -438,33 +434,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
@ -544,8 +536,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
|||||||
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
|
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
|
|
||||||
__ masm()->Ret();
|
__ masm()->Ret();
|
||||||
}
|
}
|
||||||
|
@ -49,31 +49,6 @@ class BaselineAssembler::ScratchRegisterScope {
|
|||||||
int registers_used_;
|
int registers_used_;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline bool IsSignedCondition(Condition cond) {
|
|
||||||
switch (cond) {
|
|
||||||
case kEqual:
|
|
||||||
case kNotEqual:
|
|
||||||
case kLessThan:
|
|
||||||
case kGreaterThan:
|
|
||||||
case kLessThanEqual:
|
|
||||||
case kGreaterThanEqual:
|
|
||||||
case kOverflow:
|
|
||||||
case kNoOverflow:
|
|
||||||
case kZero:
|
|
||||||
case kNotZero:
|
|
||||||
return true;
|
|
||||||
|
|
||||||
case kUnsignedLessThan:
|
|
||||||
case kUnsignedGreaterThan:
|
|
||||||
case kUnsignedLessThanEqual:
|
|
||||||
case kUnsignedGreaterThanEqual:
|
|
||||||
return false;
|
|
||||||
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __ assm->
|
#define __ assm->
|
||||||
// ppc helper
|
// ppc helper
|
||||||
template <int width = 64>
|
template <int width = 64>
|
||||||
@ -82,19 +57,19 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
|
|||||||
static_assert(width == 64 || width == 32,
|
static_assert(width == 64 || width == 32,
|
||||||
"only support 64 and 32 bit compare");
|
"only support 64 and 32 bit compare");
|
||||||
if (width == 64) {
|
if (width == 64) {
|
||||||
if (IsSignedCondition(cc)) {
|
if (is_signed(cc)) {
|
||||||
__ CmpS64(lhs, rhs);
|
__ CmpS64(lhs, rhs);
|
||||||
} else {
|
} else {
|
||||||
__ CmpU64(lhs, rhs);
|
__ CmpU64(lhs, rhs);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (IsSignedCondition(cc)) {
|
if (is_signed(cc)) {
|
||||||
__ CmpS32(lhs, rhs);
|
__ CmpS32(lhs, rhs);
|
||||||
} else {
|
} else {
|
||||||
__ CmpU32(lhs, rhs);
|
__ CmpU32(lhs, rhs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
__ b(check_condition(cc), target);
|
__ b(to_condition(cc), target);
|
||||||
}
|
}
|
||||||
#undef __
|
#undef __
|
||||||
|
|
||||||
@ -160,18 +135,18 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
|
|||||||
Label* target, Label::Distance) {
|
Label* target, Label::Distance) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ AndU64(r0, value, Operand(mask), ip, SetRC);
|
__ AndU64(r0, value, Operand(mask), ip, SetRC);
|
||||||
__ b(check_condition(cc), target, cr0);
|
__ b(to_condition(cc), target, cr0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
|
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
|
||||||
Label* target, Label::Distance) {
|
Label* target, Label::Distance) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
if (IsSignedCondition(cc)) {
|
if (is_signed(cc)) {
|
||||||
__ CmpS64(lhs, rhs, r0);
|
__ CmpS64(lhs, rhs, r0);
|
||||||
} else {
|
} else {
|
||||||
__ CmpU64(lhs, rhs, r0);
|
__ CmpU64(lhs, rhs, r0);
|
||||||
}
|
}
|
||||||
__ b(check_condition(cc), target);
|
__ b(to_condition(cc), target);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
|
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
|
||||||
@ -231,7 +206,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
|||||||
MemOperand operand, Label* target,
|
MemOperand operand, Label* target,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ LoadTaggedPointerField(ip, operand, r0);
|
__ LoadTaggedField(ip, operand, r0);
|
||||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -239,7 +214,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
|||||||
Register value, Label* target,
|
Register value, Label* target,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ LoadTaggedPointerField(ip, operand, r0);
|
__ LoadTaggedField(ip, operand, r0);
|
||||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -399,10 +374,10 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
|
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -418,12 +393,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
ASM_CODE_COMMENT(masm_);
|
|
||||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
@ -468,8 +437,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -494,8 +463,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ LoadU32(
|
__ LoadU32(
|
||||||
@ -519,8 +488,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ LoadU32(
|
__ LoadU32(
|
||||||
@ -538,17 +507,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -558,34 +527,30 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
|||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
@ -684,8 +649,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
|||||||
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
|
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ masm()->Ret();
|
__ masm()->Ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,9 +297,9 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
|
__ LoadTaggedField(output, FieldMemOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
@ -311,10 +311,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
LoadTaggedSignedField(output, source, offset);
|
LoadTaggedSignedField(output, source, offset);
|
||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
|
|
||||||
}
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ Lhu(output, FieldMemOperand(source, offset));
|
__ Lhu(output, FieldMemOperand(source, offset));
|
||||||
@ -351,8 +347,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough, clear_slot;
|
Label fallthrough, clear_slot;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -379,8 +375,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Lw(interrupt_budget,
|
__ Lw(interrupt_budget,
|
||||||
@ -401,8 +397,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ Lw(interrupt_budget,
|
__ Lw(interrupt_budget,
|
||||||
@ -419,16 +415,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -437,33 +433,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
@ -508,7 +500,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
|
|||||||
__ CalcScaledAddress(t6, t6, reg, entry_size_log2);
|
__ CalcScaledAddress(t6, t6, reg, entry_size_log2);
|
||||||
__ Jump(t6);
|
__ Jump(t6);
|
||||||
{
|
{
|
||||||
TurboAssembler::BlockTrampolinePoolScope(masm());
|
MacroAssembler::BlockTrampolinePoolScope(masm());
|
||||||
__ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
|
__ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
|
||||||
__ bind(&table);
|
__ bind(&table);
|
||||||
for (int i = 0; i < num_labels; ++i) {
|
for (int i = 0; i < num_labels; ++i) {
|
||||||
|
@ -48,31 +48,6 @@ class BaselineAssembler::ScratchRegisterScope {
|
|||||||
int registers_used_;
|
int registers_used_;
|
||||||
};
|
};
|
||||||
|
|
||||||
inline bool IsSignedCondition(Condition cond) {
|
|
||||||
switch (cond) {
|
|
||||||
case kEqual:
|
|
||||||
case kNotEqual:
|
|
||||||
case kLessThan:
|
|
||||||
case kGreaterThan:
|
|
||||||
case kLessThanEqual:
|
|
||||||
case kGreaterThanEqual:
|
|
||||||
case kOverflow:
|
|
||||||
case kNoOverflow:
|
|
||||||
case kZero:
|
|
||||||
case kNotZero:
|
|
||||||
return true;
|
|
||||||
|
|
||||||
case kUnsignedLessThan:
|
|
||||||
case kUnsignedGreaterThan:
|
|
||||||
case kUnsignedLessThanEqual:
|
|
||||||
case kUnsignedGreaterThanEqual:
|
|
||||||
return false;
|
|
||||||
|
|
||||||
default:
|
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#define __ assm->
|
#define __ assm->
|
||||||
// s390x helper
|
// s390x helper
|
||||||
template <int width = 64>
|
template <int width = 64>
|
||||||
@ -81,19 +56,19 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
|
|||||||
static_assert(width == 64 || width == 32,
|
static_assert(width == 64 || width == 32,
|
||||||
"only support 64 and 32 bit compare");
|
"only support 64 and 32 bit compare");
|
||||||
if (width == 64) {
|
if (width == 64) {
|
||||||
if (IsSignedCondition(cc)) {
|
if (is_signed(cc)) {
|
||||||
__ CmpS64(lhs, rhs);
|
__ CmpS64(lhs, rhs);
|
||||||
} else {
|
} else {
|
||||||
__ CmpU64(lhs, rhs);
|
__ CmpU64(lhs, rhs);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (IsSignedCondition(cc)) {
|
if (is_signed(cc)) {
|
||||||
__ CmpS32(lhs, rhs);
|
__ CmpS32(lhs, rhs);
|
||||||
} else {
|
} else {
|
||||||
__ CmpU32(lhs, rhs);
|
__ CmpU32(lhs, rhs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
__ b(check_condition(cc), target);
|
__ b(to_condition(cc), target);
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef __
|
#undef __
|
||||||
@ -159,18 +134,18 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
|
|||||||
Label* target, Label::Distance) {
|
Label* target, Label::Distance) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ AndP(r0, value, Operand(mask));
|
__ AndP(r0, value, Operand(mask));
|
||||||
__ b(check_condition(cc), target);
|
__ b(to_condition(cc), target);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
|
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
|
||||||
Label* target, Label::Distance) {
|
Label* target, Label::Distance) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
if (IsSignedCondition(cc)) {
|
if (is_signed(cc)) {
|
||||||
__ CmpS64(lhs, rhs);
|
__ CmpS64(lhs, rhs);
|
||||||
} else {
|
} else {
|
||||||
__ CmpU64(lhs, rhs);
|
__ CmpU64(lhs, rhs);
|
||||||
}
|
}
|
||||||
__ b(check_condition(cc), target);
|
__ b(to_condition(cc), target);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
|
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
|
||||||
@ -236,9 +211,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
|
|||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
MemOperand addr =
|
MemOperand addr =
|
||||||
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
||||||
__ LoadTaggedPointerField(ip, addr, r0);
|
__ LoadTaggedField(ip, addr, r0);
|
||||||
} else {
|
} else {
|
||||||
__ LoadTaggedPointerField(ip, operand, r0);
|
__ LoadTaggedField(ip, operand, r0);
|
||||||
}
|
}
|
||||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, value, ip, target);
|
||||||
}
|
}
|
||||||
@ -251,9 +226,9 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
|
|||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
MemOperand addr =
|
MemOperand addr =
|
||||||
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
MemOperand(operand.rx(), operand.rb(), operand.offset() + stack_bias);
|
||||||
__ LoadTaggedPointerField(ip, addr, r0);
|
__ LoadTaggedField(ip, addr, r0);
|
||||||
} else {
|
} else {
|
||||||
__ LoadTaggedPointerField(ip, operand, r0);
|
__ LoadTaggedField(ip, operand, r0);
|
||||||
}
|
}
|
||||||
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, ip, value, target);
|
JumpIfHelper<COMPRESS_POINTERS_BOOL ? 32 : 64>(masm_, cc, ip, value, target);
|
||||||
}
|
}
|
||||||
@ -412,10 +387,10 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
detail::PopAllHelper<T...>::Pop(this, registers...);
|
detail::PopAllHelper<T...>::Pop(this, registers...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset), r0);
|
__ LoadTaggedField(output, FieldMemOperand(source, offset), r0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
@ -431,12 +406,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
SmiUntag(output);
|
SmiUntag(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
ASM_CODE_COMMENT(masm_);
|
|
||||||
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
ASM_CODE_COMMENT(masm_);
|
ASM_CODE_COMMENT(masm_);
|
||||||
@ -481,8 +450,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
|||||||
Label* on_result,
|
Label* on_result,
|
||||||
Label::Distance) {
|
Label::Distance) {
|
||||||
Label fallthrough;
|
Label fallthrough;
|
||||||
LoadTaggedPointerField(scratch_and_result, feedback_vector,
|
LoadTaggedField(scratch_and_result, feedback_vector,
|
||||||
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
|
||||||
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
|
||||||
|
|
||||||
// Is it marked_for_deoptimization? If yes, clear the slot.
|
// Is it marked_for_deoptimization? If yes, clear the slot.
|
||||||
@ -507,8 +476,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ LoadU32(
|
__ LoadU32(
|
||||||
@ -532,8 +501,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
ScratchRegisterScope scratch_scope(this);
|
ScratchRegisterScope scratch_scope(this);
|
||||||
Register feedback_cell = scratch_scope.AcquireScratch();
|
Register feedback_cell = scratch_scope.AcquireScratch();
|
||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
LoadTaggedPointerField(feedback_cell, feedback_cell,
|
LoadTaggedField(feedback_cell, feedback_cell,
|
||||||
JSFunction::kFeedbackCellOffset);
|
JSFunction::kFeedbackCellOffset);
|
||||||
|
|
||||||
Register interrupt_budget = scratch_scope.AcquireScratch();
|
Register interrupt_budget = scratch_scope.AcquireScratch();
|
||||||
__ LoadU32(
|
__ LoadU32(
|
||||||
@ -550,16 +519,16 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
void BaselineAssembler::StaContextSlot(Register context, Register value,
|
||||||
uint32_t index, uint32_t depth) {
|
uint32_t index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index),
|
||||||
value);
|
value);
|
||||||
@ -568,33 +537,29 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
||||||
uint32_t depth) {
|
uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(context, context, cell_index);
|
LoadFixedArrayElement(context, context, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
int cell_index, uint32_t depth) {
|
int cell_index, uint32_t depth) {
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(context, context, Context::kPreviousOffset);
|
LoadTaggedField(context, context, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(context, context, Context::kExtensionOffset);
|
LoadTaggedField(context, context, Context::kExtensionOffset);
|
||||||
LoadTaggedPointerField(context, context,
|
LoadTaggedField(context, context, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
@ -692,8 +657,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
|||||||
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
__ masm()->LeaveFrame(StackFrame::BASELINE);
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
|
__ masm()->DropArguments(params_size, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ masm()->Ret();
|
__ masm()->Ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -287,9 +287,9 @@ void BaselineAssembler::Pop(T... registers) {
|
|||||||
(__ Pop(registers), ...);
|
(__ Pop(registers), ...);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
|
||||||
int offset) {
|
int offset) {
|
||||||
@ -300,10 +300,6 @@ void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output,
|
|||||||
int offset) {
|
int offset) {
|
||||||
__ SmiUntagField(output, FieldOperand(source, offset));
|
__ SmiUntagField(output, FieldOperand(source, offset));
|
||||||
}
|
}
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
|
|
||||||
int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
|
||||||
}
|
|
||||||
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
|
||||||
Register source, int offset) {
|
Register source, int offset) {
|
||||||
__ movzxwq(output, FieldOperand(source, offset));
|
__ movzxwq(output, FieldOperand(source, offset));
|
||||||
@ -331,45 +327,31 @@ void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
|
|||||||
__ StoreTaggedField(FieldOperand(target, offset), value);
|
__ StoreTaggedField(FieldOperand(target, offset), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
|
void BaselineAssembler::LoadTaggedField(TaggedRegister output, Register source,
|
||||||
Register source, int offset) {
|
int offset) {
|
||||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(TaggedRegister output,
|
void BaselineAssembler::LoadTaggedField(TaggedRegister output,
|
||||||
TaggedRegister source,
|
TaggedRegister source, int offset) {
|
||||||
int offset) {
|
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedPointerField(Register output,
|
void BaselineAssembler::LoadTaggedField(Register output, TaggedRegister source,
|
||||||
TaggedRegister source,
|
int offset) {
|
||||||
int offset) {
|
__ LoadTaggedField(output, FieldOperand(source, offset));
|
||||||
__ LoadTaggedPointerField(output, FieldOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(Register output,
|
|
||||||
TaggedRegister source, int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
void BaselineAssembler::LoadTaggedAnyField(TaggedRegister output,
|
|
||||||
TaggedRegister source, int offset) {
|
|
||||||
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadFixedArrayElement(Register output,
|
void BaselineAssembler::LoadFixedArrayElement(Register output,
|
||||||
TaggedRegister array,
|
TaggedRegister array,
|
||||||
int32_t index) {
|
int32_t index) {
|
||||||
LoadTaggedAnyField(output, array,
|
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
|
||||||
FixedArray::kHeaderSize + index * kTaggedSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
|
void BaselineAssembler::LoadFixedArrayElement(TaggedRegister output,
|
||||||
TaggedRegister array,
|
TaggedRegister array,
|
||||||
int32_t index) {
|
int32_t index) {
|
||||||
LoadTaggedAnyField(output, array,
|
LoadTaggedField(output, array, FixedArray::kHeaderSize + index * kTaggedSize);
|
||||||
FixedArray::kHeaderSize + index * kTaggedSize);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
|
||||||
@ -389,8 +371,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
// Decompresses pointer by complex addressing mode when necessary.
|
// Decompresses pointer by complex addressing mode when necessary.
|
||||||
TaggedRegister tagged(feedback_cell);
|
TaggedRegister tagged(feedback_cell);
|
||||||
LoadTaggedPointerField(tagged, feedback_cell,
|
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
|
||||||
JSFunction::kFeedbackCellOffset);
|
|
||||||
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
|
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset),
|
||||||
Immediate(weight));
|
Immediate(weight));
|
||||||
if (skip_interrupt_label) {
|
if (skip_interrupt_label) {
|
||||||
@ -407,8 +388,7 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
|
|||||||
LoadFunction(feedback_cell);
|
LoadFunction(feedback_cell);
|
||||||
// Decompresses pointer by complex addressing mode when necessary.
|
// Decompresses pointer by complex addressing mode when necessary.
|
||||||
TaggedRegister tagged(feedback_cell);
|
TaggedRegister tagged(feedback_cell);
|
||||||
LoadTaggedPointerField(tagged, feedback_cell,
|
LoadTaggedField(tagged, feedback_cell, JSFunction::kFeedbackCellOffset);
|
||||||
JSFunction::kFeedbackCellOffset);
|
|
||||||
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
|
__ addl(FieldOperand(tagged, FeedbackCell::kInterruptBudgetOffset), weight);
|
||||||
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
if (skip_interrupt_label) __ j(greater_equal, skip_interrupt_label);
|
||||||
}
|
}
|
||||||
@ -420,17 +400,17 @@ void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
|
|||||||
// addressing mode, any intermediate context pointer is loaded in compressed
|
// addressing mode, any intermediate context pointer is loaded in compressed
|
||||||
// form.
|
// form.
|
||||||
if (depth == 0) {
|
if (depth == 0) {
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, context,
|
LoadTaggedField(kInterpreterAccumulatorRegister, context,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
} else {
|
} else {
|
||||||
TaggedRegister tagged(context);
|
TaggedRegister tagged(context);
|
||||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||||
--depth;
|
--depth;
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
|
LoadTaggedField(kInterpreterAccumulatorRegister, tagged,
|
||||||
Context::OffsetOfElementAt(index));
|
Context::OffsetOfElementAt(index));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -442,10 +422,10 @@ void BaselineAssembler::StaContextSlot(Register context, Register value,
|
|||||||
// form.
|
// form.
|
||||||
if (depth > 0) {
|
if (depth > 0) {
|
||||||
TaggedRegister tagged(context);
|
TaggedRegister tagged(context);
|
||||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||||
--depth;
|
--depth;
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
// Decompress tagged pointer.
|
// Decompress tagged pointer.
|
||||||
@ -463,29 +443,26 @@ void BaselineAssembler::LdaModuleVariable(Register context, int cell_index,
|
|||||||
// enabled, any intermediate context pointer is loaded in compressed form.
|
// enabled, any intermediate context pointer is loaded in compressed form.
|
||||||
TaggedRegister tagged(context);
|
TaggedRegister tagged(context);
|
||||||
if (depth == 0) {
|
if (depth == 0) {
|
||||||
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
|
LoadTaggedField(tagged, context, Context::kExtensionOffset);
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||||
--depth;
|
--depth;
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
|
LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
|
||||||
}
|
}
|
||||||
if (cell_index > 0) {
|
if (cell_index > 0) {
|
||||||
LoadTaggedPointerField(tagged, tagged,
|
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(tagged, tagged,
|
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularImportsOffset);
|
||||||
SourceTextModule::kRegularImportsOffset);
|
|
||||||
// The actual array index is (-cell_index - 1).
|
// The actual array index is (-cell_index - 1).
|
||||||
cell_index = -cell_index - 1;
|
cell_index = -cell_index - 1;
|
||||||
}
|
}
|
||||||
LoadFixedArrayElement(tagged, tagged, cell_index);
|
LoadFixedArrayElement(tagged, tagged, cell_index);
|
||||||
LoadTaggedAnyField(kInterpreterAccumulatorRegister, tagged,
|
LoadTaggedField(kInterpreterAccumulatorRegister, tagged, Cell::kValueOffset);
|
||||||
Cell::kValueOffset);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
||||||
@ -495,17 +472,16 @@ void BaselineAssembler::StaModuleVariable(Register context, Register value,
|
|||||||
// enabled, any intermediate context pointer is loaded in compressed form.
|
// enabled, any intermediate context pointer is loaded in compressed form.
|
||||||
TaggedRegister tagged(context);
|
TaggedRegister tagged(context);
|
||||||
if (depth == 0) {
|
if (depth == 0) {
|
||||||
LoadTaggedPointerField(tagged, context, Context::kExtensionOffset);
|
LoadTaggedField(tagged, context, Context::kExtensionOffset);
|
||||||
} else {
|
} else {
|
||||||
LoadTaggedPointerField(tagged, context, Context::kPreviousOffset);
|
LoadTaggedField(tagged, context, Context::kPreviousOffset);
|
||||||
--depth;
|
--depth;
|
||||||
for (; depth > 0; --depth) {
|
for (; depth > 0; --depth) {
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kPreviousOffset);
|
LoadTaggedField(tagged, tagged, Context::kPreviousOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(tagged, tagged, Context::kExtensionOffset);
|
LoadTaggedField(tagged, tagged, Context::kExtensionOffset);
|
||||||
}
|
}
|
||||||
LoadTaggedPointerField(tagged, tagged,
|
LoadTaggedField(tagged, tagged, SourceTextModule::kRegularExportsOffset);
|
||||||
SourceTextModule::kRegularExportsOffset);
|
|
||||||
|
|
||||||
// The actual array index is (cell_index - 1).
|
// The actual array index is (cell_index - 1).
|
||||||
cell_index -= 1;
|
cell_index -= 1;
|
||||||
@ -587,8 +563,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ masm()->DropArguments(params_size, scratch,
|
__ masm()->DropArguments(params_size, scratch,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ masm()->Ret();
|
__ masm()->Ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -130,8 +130,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
|
__ DropArguments(scratch, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ Jump(lr);
|
__ Jump(lr);
|
||||||
|
|
||||||
__ bind(&stack_overflow);
|
__ bind(&stack_overflow);
|
||||||
@ -278,8 +278,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
__ LeaveFrame(StackFrame::CONSTRUCT);
|
__ LeaveFrame(StackFrame::CONSTRUCT);
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(r1, TurboAssembler::kCountIsSmi,
|
__ DropArguments(r1, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ Jump(lr);
|
__ Jump(lr);
|
||||||
|
|
||||||
__ bind(&check_receiver);
|
__ bind(&check_receiver);
|
||||||
@ -826,8 +826,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
|
|||||||
__ LeaveFrame(StackFrame::INTERPRETED);
|
__ LeaveFrame(StackFrame::INTERPRETED);
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
|
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
@ -1352,7 +1352,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
|
|||||||
__ sub(start_address, start_address, scratch);
|
__ sub(start_address, start_address, scratch);
|
||||||
// Push the arguments.
|
// Push the arguments.
|
||||||
__ PushArray(start_address, num_args, scratch,
|
__ PushArray(start_address, num_args, scratch,
|
||||||
TurboAssembler::PushArrayOrder::kReverse);
|
MacroAssembler::PushArrayOrder::kReverse);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -1820,8 +1820,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||||||
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
|
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
|
||||||
__ cmp(r0, Operand(JSParameterCount(2)), ge);
|
__ cmp(r0, Operand(JSParameterCount(2)), ge);
|
||||||
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
|
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
|
||||||
__ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
|
__ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -1897,8 +1897,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
|||||||
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
|
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
|
||||||
__ cmp(r0, Operand(JSParameterCount(3)), ge);
|
__ cmp(r0, Operand(JSParameterCount(3)), ge);
|
||||||
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
|
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
|
||||||
__ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
|
__ DropArgumentsAndPushNewReceiver(r0, r5, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -1940,8 +1940,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
|||||||
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
|
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
|
||||||
__ cmp(r0, Operand(JSParameterCount(3)), ge);
|
__ cmp(r0, Operand(JSParameterCount(3)), ge);
|
||||||
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
|
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
|
||||||
__ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
|
__ DropArgumentsAndPushNewReceiver(r0, r4, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
|
@ -163,7 +163,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
|
__ DropArguments(x1, MacroAssembler::kCountIncludesReceiver);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
__ Bind(&stack_overflow);
|
__ Bind(&stack_overflow);
|
||||||
@ -213,7 +213,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
// -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
|
// -- sp[4*kSystemPointerSize]: context (pushed by FrameScope)
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
|
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(w4);
|
||||||
@ -348,7 +348,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
// Leave construct frame.
|
// Leave construct frame.
|
||||||
__ LeaveFrame(StackFrame::CONSTRUCT);
|
__ LeaveFrame(StackFrame::CONSTRUCT);
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(x1, TurboAssembler::kCountIncludesReceiver);
|
__ DropArguments(x1, MacroAssembler::kCountIncludesReceiver);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
// Otherwise we do a smi check and fall through to check if the return value
|
// Otherwise we do a smi check and fall through to check if the return value
|
||||||
@ -423,7 +423,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
|
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
|
||||||
__ B(ne, &done);
|
__ B(ne, &done);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data,
|
sfi_data,
|
||||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||||
__ Bind(&done);
|
__ Bind(&done);
|
||||||
@ -446,10 +446,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ AssertGeneratorObject(x1);
|
__ AssertGeneratorObject(x1);
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x4,
|
||||||
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(x4, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(x4, JSFunction::kContextOffset));
|
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -477,7 +476,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ B(lo, &stack_overflow);
|
__ B(lo, &stack_overflow);
|
||||||
|
|
||||||
// Get number of arguments for generator function.
|
// Get number of arguments for generator function.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
x10, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Ldrh(w10, FieldMemOperand(
|
__ Ldrh(w10, FieldMemOperand(
|
||||||
x10, SharedFunctionInfo::kFormalParameterCountOffset));
|
x10, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -493,8 +492,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
|
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
|
||||||
|
|
||||||
// Poke receiver into highest claimed slot.
|
// Poke receiver into highest claimed slot.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x5,
|
||||||
x5, FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kReceiverOffset));
|
||||||
__ Poke(x5, __ ReceiverOperand(x10));
|
__ Poke(x5, __ ReceiverOperand(x10));
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -507,7 +506,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
// Copy the function arguments from the generator object's register file.
|
// Copy the function arguments from the generator object's register file.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x5,
|
x5,
|
||||||
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
{
|
{
|
||||||
@ -518,7 +517,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
__ Add(x5, x5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
||||||
__ Bind(&loop);
|
__ Bind(&loop);
|
||||||
__ Sub(x10, x10, 1);
|
__ Sub(x10, x10, 1);
|
||||||
__ LoadAnyTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
|
__ LoadTaggedField(x11, MemOperand(x5, -kTaggedSize, PreIndex));
|
||||||
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
|
__ Str(x11, MemOperand(x12, -kSystemPointerSize, PostIndex));
|
||||||
__ Cbnz(x10, &loop);
|
__ Cbnz(x10, &loop);
|
||||||
__ Bind(&done);
|
__ Bind(&done);
|
||||||
@ -527,9 +526,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
x3, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
|
x3, FieldMemOperand(x3, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, x3, x0, &is_baseline);
|
||||||
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
|
__ CompareObjectType(x3, x3, x3, BYTECODE_ARRAY_TYPE);
|
||||||
@ -539,7 +538,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
x0, FieldMemOperand(x4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Ldrh(w0, FieldMemOperand(
|
__ Ldrh(w0, FieldMemOperand(
|
||||||
x0, SharedFunctionInfo::kFormalParameterCountOffset));
|
x0, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -549,7 +548,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Mov(x3, x1);
|
__ Mov(x3, x1);
|
||||||
__ Mov(x1, x4);
|
__ Mov(x1, x4);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
|
__ LoadTaggedField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
|
||||||
__ JumpCodeObject(x2);
|
__ JumpCodeObject(x2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -561,8 +560,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(x1, padreg, x4, x5);
|
__ Push(x1, padreg, x4, x5);
|
||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(padreg, x1);
|
__ Pop(padreg, x1);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x4,
|
||||||
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ B(&stepping_prepared);
|
__ B(&stepping_prepared);
|
||||||
|
|
||||||
@ -572,8 +571,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(x1, padreg);
|
__ Push(x1, padreg);
|
||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(padreg, x1);
|
__ Pop(padreg, x1);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x4,
|
||||||
x4, FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(x1, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ B(&stepping_prepared);
|
__ B(&stepping_prepared);
|
||||||
|
|
||||||
@ -1108,11 +1107,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
Register feedback_vector = temps.AcquireX();
|
Register feedback_vector = temps.AcquireX();
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
__ AssertFeedbackVector(feedback_vector, x4);
|
__ AssertFeedbackVector(feedback_vector, x4);
|
||||||
|
|
||||||
// Check the tiering state.
|
// Check the tiering state.
|
||||||
@ -1205,7 +1203,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
{
|
{
|
||||||
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
|
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
|
||||||
// Drop the frame created by the baseline call.
|
// Drop the frame created by the baseline call.
|
||||||
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
|
__ Pop<MacroAssembler::kAuthLR>(fp, lr);
|
||||||
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
|
__ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector);
|
||||||
__ Trap();
|
__ Trap();
|
||||||
}
|
}
|
||||||
@ -1270,9 +1268,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
|
|
||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
x4, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kInterpreterBytecodeArrayRegister,
|
kInterpreterBytecodeArrayRegister,
|
||||||
FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(x4, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -1288,17 +1286,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ B(ne, &compile_lazy);
|
__ B(ne, &compile_lazy);
|
||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
// and update invocation count. Otherwise, setup the stack frame.
|
// and update invocation count. Otherwise, setup the stack frame.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(x7,
|
||||||
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
||||||
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
||||||
__ B(ne, &push_stack_frame);
|
__ B(ne, &push_stack_frame);
|
||||||
@ -1330,7 +1327,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
// the frame (that is done below).
|
// the frame (that is done below).
|
||||||
__ Bind(&push_stack_frame);
|
__ Bind(&push_stack_frame);
|
||||||
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
FrameScope frame_scope(masm, StackFrame::MANUAL);
|
||||||
__ Push<TurboAssembler::kSignLR>(lr, fp);
|
__ Push<MacroAssembler::kSignLR>(lr, fp);
|
||||||
__ mov(fp, sp);
|
__ mov(fp, sp);
|
||||||
__ Push(cp, closure);
|
__ Push(cp, closure);
|
||||||
|
|
||||||
@ -1342,7 +1339,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
|
|
||||||
// Push actual argument count, bytecode array, Smi tagged bytecode array
|
// Push actual argument count, bytecode array, Smi tagged bytecode array
|
||||||
// offset and an undefined (to properly align the stack pointer).
|
// offset and an undefined (to properly align the stack pointer).
|
||||||
static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
|
static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
|
||||||
__ SmiTag(x6, kInterpreterBytecodeOffsetRegister);
|
__ SmiTag(x6, kInterpreterBytecodeOffsetRegister);
|
||||||
__ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister);
|
__ Push(kJavaScriptCallArgCountRegister, kInterpreterBytecodeArrayRegister);
|
||||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||||
@ -1480,16 +1477,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bind(&is_baseline);
|
__ bind(&is_baseline);
|
||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_vector,
|
feedback_vector,
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
x7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
__ Ldrh(x7, FieldMemOperand(x7, Map::kInstanceTypeOffset));
|
||||||
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
__ Cmp(x7, FEEDBACK_VECTOR_TYPE);
|
||||||
@ -1582,7 +1579,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
|
|||||||
}
|
}
|
||||||
|
|
||||||
__ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy,
|
__ CopyDoubleWords(stack_addr, last_arg_addr, slots_to_copy,
|
||||||
TurboAssembler::kDstLessThanSrcAndReverse);
|
MacroAssembler::kDstLessThanSrcAndReverse);
|
||||||
|
|
||||||
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
|
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
|
||||||
// Store "undefined" as the receiver arg if we need to.
|
// Store "undefined" as the receiver arg if we need to.
|
||||||
@ -1732,16 +1729,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// get the custom trampoline, otherwise grab the entry address of the global
|
// get the custom trampoline, otherwise grab the entry address of the global
|
||||||
// trampoline.
|
// trampoline.
|
||||||
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ Ldr(x1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x1, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
|
x1, FieldMemOperand(x1, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
__ CompareObjectType(x1, kInterpreterDispatchTableRegister,
|
__ CompareObjectType(x1, kInterpreterDispatchTableRegister,
|
||||||
kInterpreterDispatchTableRegister,
|
kInterpreterDispatchTableRegister,
|
||||||
INTERPRETER_DATA_TYPE);
|
INTERPRETER_DATA_TYPE);
|
||||||
__ B(ne, &builtin_trampoline);
|
__ B(ne, &builtin_trampoline);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
|
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
|
||||||
__ LoadCodeEntry(x1, x1);
|
__ LoadCodeEntry(x1, x1);
|
||||||
__ B(&trampoline_loaded);
|
__ B(&trampoline_loaded);
|
||||||
@ -1882,7 +1879,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Restore fp, lr.
|
// Restore fp, lr.
|
||||||
__ Mov(sp, fp);
|
__ Mov(sp, fp);
|
||||||
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
|
__ Pop<MacroAssembler::kAuthLR>(fp, lr);
|
||||||
|
|
||||||
__ LoadEntryFromBuiltinIndex(builtin);
|
__ LoadEntryFromBuiltinIndex(builtin);
|
||||||
__ Jump(builtin);
|
__ Jump(builtin);
|
||||||
@ -1997,7 +1994,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1,
|
x1,
|
||||||
FieldMemOperand(
|
FieldMemOperand(
|
||||||
x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||||
@ -2069,7 +2066,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||||||
__ Peek(arg_array, 2 * kSystemPointerSize);
|
__ Peek(arg_array, 2 * kSystemPointerSize);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
}
|
}
|
||||||
__ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
|
__ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
|
||||||
__ PushArgument(this_arg);
|
__ PushArgument(this_arg);
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2158,7 +2155,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
|
|||||||
__ SlotAddress(copy_from, count);
|
__ SlotAddress(copy_from, count);
|
||||||
__ Add(copy_to, copy_from, kSystemPointerSize);
|
__ Add(copy_to, copy_from, kSystemPointerSize);
|
||||||
__ CopyDoubleWords(copy_to, copy_from, count,
|
__ CopyDoubleWords(copy_to, copy_from, count,
|
||||||
TurboAssembler::kSrcLessThanDst);
|
MacroAssembler::kSrcLessThanDst);
|
||||||
__ Drop(2);
|
__ Drop(2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2206,7 +2203,7 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
|||||||
__ Peek(arguments_list, 3 * kSystemPointerSize);
|
__ Peek(arguments_list, 3 * kSystemPointerSize);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
}
|
}
|
||||||
__ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
|
__ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
|
||||||
__ PushArgument(this_argument);
|
__ PushArgument(this_argument);
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2264,7 +2261,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
|||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
__ DropArguments(argc, TurboAssembler::kCountIncludesReceiver);
|
__ DropArguments(argc, MacroAssembler::kCountIncludesReceiver);
|
||||||
|
|
||||||
// Push receiver (undefined).
|
// Push receiver (undefined).
|
||||||
__ PushArgument(undefined_value);
|
__ PushArgument(undefined_value);
|
||||||
@ -2348,7 +2345,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
|
// Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0.
|
||||||
Label ok, fail;
|
Label ok, fail;
|
||||||
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
|
__ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray);
|
||||||
__ LoadTaggedPointerField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
|
__ LoadTaggedField(x10, FieldMemOperand(x2, HeapObject::kMapOffset));
|
||||||
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
|
__ Ldrh(x13, FieldMemOperand(x10, Map::kInstanceTypeOffset));
|
||||||
__ Cmp(x13, FIXED_ARRAY_TYPE);
|
__ Cmp(x13, FIXED_ARRAY_TYPE);
|
||||||
__ B(eq, &ok);
|
__ B(eq, &ok);
|
||||||
@ -2394,7 +2391,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
__ Add(argc, argc, len); // Update new argc.
|
__ Add(argc, argc, len); // Update new argc.
|
||||||
__ Bind(&loop);
|
__ Bind(&loop);
|
||||||
__ Sub(len, len, 1);
|
__ Sub(len, len, 1);
|
||||||
__ LoadAnyTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
|
__ LoadTaggedField(scratch, MemOperand(src, kTaggedSize, PostIndex));
|
||||||
__ CmpTagged(scratch, the_hole_value);
|
__ CmpTagged(scratch, the_hole_value);
|
||||||
__ Csel(scratch, scratch, undefined_value, ne);
|
__ Csel(scratch, scratch, undefined_value, ne);
|
||||||
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
|
__ Str(scratch, MemOperand(dst, kSystemPointerSize, PostIndex));
|
||||||
@ -2426,7 +2423,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
|||||||
if (mode == CallOrConstructMode::kConstruct) {
|
if (mode == CallOrConstructMode::kConstruct) {
|
||||||
Label new_target_constructor, new_target_not_constructor;
|
Label new_target_constructor, new_target_not_constructor;
|
||||||
__ JumpIfSmi(x3, &new_target_not_constructor);
|
__ JumpIfSmi(x3, &new_target_not_constructor);
|
||||||
__ LoadTaggedPointerField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
|
__ LoadTaggedField(x5, FieldMemOperand(x3, HeapObject::kMapOffset));
|
||||||
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
|
__ Ldrb(x5, FieldMemOperand(x5, Map::kBitFieldOffset));
|
||||||
__ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask,
|
__ TestAndBranchIfAnySet(x5, Map::Bits1::IsConstructorBit::kMask,
|
||||||
&new_target_constructor);
|
&new_target_constructor);
|
||||||
@ -2486,14 +2483,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
__ AssertCallableFunction(x1);
|
__ AssertCallableFunction(x1);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
|
|
||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(x1, JSFunction::kContextOffset));
|
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
|
__ Ldr(w3, FieldMemOperand(x2, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -2545,7 +2541,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(cp, x1, x0, padreg);
|
__ Pop(cp, x1, x0, padreg);
|
||||||
__ SmiUntag(x0);
|
__ SmiUntag(x0);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x2, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Bind(&convert_receiver);
|
__ Bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2579,7 +2575,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Load [[BoundArguments]] into x2 and length of that into x4.
|
// Load [[BoundArguments]] into x2 and length of that into x4.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
|
bound_argv, FieldMemOperand(x1, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagField(bound_argc,
|
__ SmiUntagField(bound_argc,
|
||||||
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
||||||
@ -2662,7 +2658,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
__ SlotAddress(copy_to, total_argc);
|
__ SlotAddress(copy_to, total_argc);
|
||||||
__ Sub(copy_from, copy_to, kSystemPointerSize);
|
__ Sub(copy_from, copy_to, kSystemPointerSize);
|
||||||
__ CopyDoubleWords(copy_to, copy_from, argc,
|
__ CopyDoubleWords(copy_to, copy_from, argc,
|
||||||
TurboAssembler::kSrcLessThanDst);
|
MacroAssembler::kSrcLessThanDst);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2681,8 +2677,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
__ SlotAddress(copy_to, 1);
|
__ SlotAddress(copy_to, 1);
|
||||||
__ Bind(&loop);
|
__ Bind(&loop);
|
||||||
__ Sub(counter, counter, 1);
|
__ Sub(counter, counter, 1);
|
||||||
__ LoadAnyTaggedField(scratch,
|
__ LoadTaggedField(scratch,
|
||||||
MemOperand(bound_argv, kTaggedSize, PostIndex));
|
MemOperand(bound_argv, kTaggedSize, PostIndex));
|
||||||
__ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
|
__ Str(scratch, MemOperand(copy_to, kSystemPointerSize, PostIndex));
|
||||||
__ Cbnz(counter, &loop);
|
__ Cbnz(counter, &loop);
|
||||||
}
|
}
|
||||||
@ -2703,15 +2699,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
__ AssertBoundFunction(x1);
|
__ AssertBoundFunction(x1);
|
||||||
|
|
||||||
// Patch the receiver to [[BoundThis]].
|
// Patch the receiver to [[BoundThis]].
|
||||||
__ LoadAnyTaggedField(x10,
|
__ LoadTaggedField(x10,
|
||||||
FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
|
FieldMemOperand(x1, JSBoundFunction::kBoundThisOffset));
|
||||||
__ Poke(x10, __ ReceiverOperand(x0));
|
__ Poke(x10, __ ReceiverOperand(x0));
|
||||||
|
|
||||||
// Push the [[BoundArguments]] onto the stack.
|
// Push the [[BoundArguments]] onto the stack.
|
||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2812,7 +2808,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
Label call_generic_stub;
|
Label call_generic_stub;
|
||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
x4, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
__ Ldr(w4, FieldMemOperand(x4, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ TestAndBranchIfAllClear(
|
__ TestAndBranchIfAllClear(
|
||||||
@ -2844,13 +2840,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
Label done;
|
Label done;
|
||||||
__ CmpTagged(x1, x3);
|
__ CmpTagged(x1, x3);
|
||||||
__ B(ne, &done);
|
__ B(ne, &done);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
x3, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Bind(&done);
|
__ Bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
x1, FieldMemOperand(x1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2874,8 +2870,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
__ JumpIfSmi(target, &non_constructor);
|
__ JumpIfSmi(target, &non_constructor);
|
||||||
|
|
||||||
// Check if target has a [[Construct]] internal method.
|
// Check if target has a [[Construct]] internal method.
|
||||||
__ LoadTaggedPointerField(map,
|
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(target, HeapObject::kMapOffset));
|
|
||||||
{
|
{
|
||||||
Register flags = x2;
|
Register flags = x2;
|
||||||
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
||||||
@ -2976,12 +2971,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
Register scratch = x10;
|
Register scratch = x10;
|
||||||
Label allocate_vector, done;
|
Label allocate_vector, done;
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
vector, FieldMemOperand(kWasmInstanceRegister,
|
vector, FieldMemOperand(kWasmInstanceRegister,
|
||||||
WasmInstanceObject::kFeedbackVectorsOffset));
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
__ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
|
__ Add(vector, vector, Operand(func_index, LSL, kTaggedSizeLog2));
|
||||||
__ LoadTaggedPointerField(vector,
|
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
FieldMemOperand(vector, FixedArray::kHeaderSize));
|
|
||||||
__ JumpIfSmi(vector, &allocate_vector);
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ Push(vector, xzr);
|
__ Push(vector, xzr);
|
||||||
@ -2996,7 +2990,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
// Save registers.
|
// Save registers.
|
||||||
__ PushXRegList(kSavedGpRegs);
|
__ PushXRegList(kSavedGpRegs);
|
||||||
__ PushQRegList(kSavedFpRegs);
|
__ PushQRegList(kSavedFpRegs);
|
||||||
__ Push<TurboAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
|
__ Push<MacroAssembler::kSignLR>(lr, xzr); // xzr is for alignment.
|
||||||
|
|
||||||
// Arguments to the runtime function: instance, func_index, and an
|
// Arguments to the runtime function: instance, func_index, and an
|
||||||
// additional stack slot for the NativeModule. The first pushed register
|
// additional stack slot for the NativeModule. The first pushed register
|
||||||
@ -3008,7 +3002,7 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
__ Mov(vector, kReturnRegister0);
|
__ Mov(vector, kReturnRegister0);
|
||||||
|
|
||||||
// Restore registers and frame type.
|
// Restore registers and frame type.
|
||||||
__ Pop<TurboAssembler::kAuthLR>(xzr, lr);
|
__ Pop<MacroAssembler::kAuthLR>(xzr, lr);
|
||||||
__ PopQRegList(kSavedFpRegs);
|
__ PopQRegList(kSavedFpRegs);
|
||||||
__ PopXRegList(kSavedGpRegs);
|
__ PopXRegList(kSavedGpRegs);
|
||||||
// Restore the instance from the frame.
|
// Restore the instance from the frame.
|
||||||
@ -3121,8 +3115,8 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
|
|||||||
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
||||||
// We had to prepare the parameters for the Call: we have to put the context
|
// We had to prepare the parameters for the Call: we have to put the context
|
||||||
// into kContextRegister.
|
// into kContextRegister.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
kContextRegister, // cp(x27)
|
kContextRegister, // cp(x27)
|
||||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||||
WasmInstanceObject::kNativeContextOffset)));
|
WasmInstanceObject::kNativeContextOffset)));
|
||||||
}
|
}
|
||||||
@ -3210,7 +3204,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
|
|||||||
MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset));
|
MemOperand(fp, BuiltinWasmWrapperConstants::kGCScanSlotCountOffset));
|
||||||
__ Stp(wasm_instance, function_data,
|
__ Stp(wasm_instance, function_data,
|
||||||
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
MemOperand(sp, -2 * kSystemPointerSize, PreIndex));
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
kContextRegister,
|
kContextRegister,
|
||||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||||
WasmInstanceObject::kNativeContextOffset)));
|
WasmInstanceObject::kNativeContextOffset)));
|
||||||
@ -3256,15 +3250,14 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
|
|||||||
wasm::JumpBuffer::Retired);
|
wasm::JumpBuffer::Retired);
|
||||||
}
|
}
|
||||||
Register parent = tmp2;
|
Register parent = tmp2;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(parent,
|
||||||
parent,
|
FieldMemOperand(active_continuation,
|
||||||
FieldMemOperand(active_continuation,
|
WasmContinuationObject::kParentOffset));
|
||||||
WasmContinuationObject::kParentOffset));
|
|
||||||
|
|
||||||
// Update active continuation root.
|
// Update active continuation root.
|
||||||
int32_t active_continuation_offset =
|
int32_t active_continuation_offset =
|
||||||
TurboAssembler::RootRegisterOffsetForRootIndex(
|
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||||
RootIndex::kActiveContinuation);
|
RootIndex::kActiveContinuation);
|
||||||
__ Str(parent, MemOperand(kRootRegister, active_continuation_offset));
|
__ Str(parent, MemOperand(kRootRegister, active_continuation_offset));
|
||||||
jmpbuf = parent;
|
jmpbuf = parent;
|
||||||
__ LoadExternalPointerField(
|
__ LoadExternalPointerField(
|
||||||
@ -3293,7 +3286,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
|
|||||||
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset);
|
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset);
|
||||||
__ Move(tmp2, Smi::FromInt(WasmSuspenderObject::kInactive));
|
__ Move(tmp2, Smi::FromInt(WasmSuspenderObject::kInactive));
|
||||||
__ StoreTaggedField(tmp2, state_loc);
|
__ StoreTaggedField(tmp2, state_loc);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender,
|
suspender,
|
||||||
FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||||
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
||||||
@ -3313,8 +3306,8 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
|
|||||||
__ StoreTaggedField(tmp2, state_loc);
|
__ StoreTaggedField(tmp2, state_loc);
|
||||||
__ bind(&undefined);
|
__ bind(&undefined);
|
||||||
int32_t active_suspender_offset =
|
int32_t active_suspender_offset =
|
||||||
TurboAssembler::RootRegisterOffsetForRootIndex(
|
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||||
RootIndex::kActiveSuspender);
|
RootIndex::kActiveSuspender);
|
||||||
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
|
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3322,17 +3315,16 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
|
|||||||
Register function_data,
|
Register function_data,
|
||||||
Register wasm_instance) {
|
Register wasm_instance) {
|
||||||
Register closure = function_data;
|
Register closure = function_data;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
MemOperand(
|
MemOperand(
|
||||||
closure,
|
closure,
|
||||||
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
FieldMemOperand(function_data,
|
FieldMemOperand(function_data, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
SharedFunctionInfo::kFunctionDataOffset));
|
|
||||||
|
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
wasm_instance,
|
wasm_instance,
|
||||||
FieldMemOperand(function_data,
|
FieldMemOperand(function_data,
|
||||||
WasmExportedFunctionData::kInstanceOffset));
|
WasmExportedFunctionData::kInstanceOffset));
|
||||||
@ -3573,7 +3565,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
// A result of AllocateSuspender is in the return register.
|
// A result of AllocateSuspender is in the return register.
|
||||||
__ Str(suspender, MemOperand(fp, kSuspenderOffset));
|
__ Str(suspender, MemOperand(fp, kSuspenderOffset));
|
||||||
DEFINE_SCOPED(target_continuation);
|
DEFINE_SCOPED(target_continuation);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
target_continuation,
|
target_continuation,
|
||||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
FREE_REG(suspender);
|
FREE_REG(suspender);
|
||||||
@ -4229,7 +4221,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
__ Mov(scratch, 1);
|
__ Mov(scratch, 1);
|
||||||
__ Str(scratch, MemOperand(thread_in_wasm_flag_addr, 0));
|
__ Str(scratch, MemOperand(thread_in_wasm_flag_addr, 0));
|
||||||
|
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_entry,
|
function_entry,
|
||||||
FieldMemOperand(function_data,
|
FieldMemOperand(function_data,
|
||||||
WasmExportedFunctionData::kInternalOffset));
|
WasmExportedFunctionData::kInternalOffset));
|
||||||
@ -4317,7 +4309,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
// expected to be on the top of the stack).
|
// expected to be on the top of the stack).
|
||||||
// We cannot use just the ret instruction for this, because we cannot pass
|
// We cannot use just the ret instruction for this, because we cannot pass
|
||||||
// the number of slots to remove in a Register as an argument.
|
// the number of slots to remove in a Register as an argument.
|
||||||
__ DropArguments(param_count, TurboAssembler::kCountExcludesReceiver);
|
__ DropArguments(param_count, MacroAssembler::kCountExcludesReceiver);
|
||||||
__ Ret(lr);
|
__ Ret(lr);
|
||||||
|
|
||||||
// -------------------------------------------
|
// -------------------------------------------
|
||||||
@ -4497,7 +4489,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||||||
regs.ResetExcept(promise, suspender, continuation);
|
regs.ResetExcept(promise, suspender, continuation);
|
||||||
|
|
||||||
DEFINE_REG(suspender_continuation);
|
DEFINE_REG(suspender_continuation);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender_continuation,
|
suspender_continuation,
|
||||||
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
@ -4518,18 +4510,19 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||||||
// Update roots.
|
// Update roots.
|
||||||
// -------------------------------------------
|
// -------------------------------------------
|
||||||
DEFINE_REG(caller);
|
DEFINE_REG(caller);
|
||||||
__ LoadAnyTaggedField(caller,
|
__ LoadTaggedField(caller,
|
||||||
FieldMemOperand(suspender_continuation,
|
FieldMemOperand(suspender_continuation,
|
||||||
WasmContinuationObject::kParentOffset));
|
WasmContinuationObject::kParentOffset));
|
||||||
int32_t active_continuation_offset =
|
int32_t active_continuation_offset =
|
||||||
TurboAssembler::RootRegisterOffsetForRootIndex(
|
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||||
RootIndex::kActiveContinuation);
|
RootIndex::kActiveContinuation);
|
||||||
__ Str(caller, MemOperand(kRootRegister, active_continuation_offset));
|
__ Str(caller, MemOperand(kRootRegister, active_continuation_offset));
|
||||||
DEFINE_REG(parent);
|
DEFINE_REG(parent);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
parent, FieldMemOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||||
int32_t active_suspender_offset =
|
int32_t active_suspender_offset =
|
||||||
TurboAssembler::RootRegisterOffsetForRootIndex(RootIndex::kActiveSuspender);
|
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||||
|
RootIndex::kActiveSuspender);
|
||||||
__ Str(parent, MemOperand(kRootRegister, active_suspender_offset));
|
__ Str(parent, MemOperand(kRootRegister, active_suspender_offset));
|
||||||
regs.ResetExcept(promise, caller);
|
regs.ResetExcept(promise, caller);
|
||||||
|
|
||||||
@ -4596,7 +4589,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
// Load suspender from closure.
|
// Load suspender from closure.
|
||||||
// -------------------------------------------
|
// -------------------------------------------
|
||||||
DEFINE_REG(sfi);
|
DEFINE_REG(sfi);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
sfi,
|
sfi,
|
||||||
MemOperand(
|
MemOperand(
|
||||||
closure,
|
closure,
|
||||||
@ -4606,12 +4599,12 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
// RecordWriteField calls later.
|
// RecordWriteField calls later.
|
||||||
DEFINE_PINNED(suspender, WriteBarrierDescriptor::ObjectRegister());
|
DEFINE_PINNED(suspender, WriteBarrierDescriptor::ObjectRegister());
|
||||||
DEFINE_REG(function_data);
|
DEFINE_REG(function_data);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
// The write barrier uses a fixed register for the host object (rdi). The next
|
// The write barrier uses a fixed register for the host object (rdi). The next
|
||||||
// barrier is on the suspender, so load it in rdi directly.
|
// barrier is on the suspender, so load it in rdi directly.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender,
|
suspender,
|
||||||
FieldMemOperand(function_data, WasmResumeData::kSuspenderOffset));
|
FieldMemOperand(function_data, WasmResumeData::kSuspenderOffset));
|
||||||
// Check the suspender state.
|
// Check the suspender state.
|
||||||
@ -4660,8 +4653,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
scratch,
|
scratch,
|
||||||
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset));
|
FieldMemOperand(suspender, WasmSuspenderObject::kStateOffset));
|
||||||
int32_t active_suspender_offset =
|
int32_t active_suspender_offset =
|
||||||
TurboAssembler::RootRegisterOffsetForRootIndex(
|
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||||
RootIndex::kActiveSuspender);
|
RootIndex::kActiveSuspender);
|
||||||
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
|
__ Str(suspender, MemOperand(kRootRegister, active_suspender_offset));
|
||||||
|
|
||||||
// Next line we are going to load a field from suspender, but we have to use
|
// Next line we are going to load a field from suspender, but we have to use
|
||||||
@ -4670,10 +4663,9 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
FREE_REG(suspender);
|
FREE_REG(suspender);
|
||||||
DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister());
|
DEFINE_PINNED(target_continuation, WriteBarrierDescriptor::ObjectRegister());
|
||||||
suspender = target_continuation;
|
suspender = target_continuation;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
target_continuation,
|
target_continuation,
|
||||||
FieldMemOperand(suspender,
|
FieldMemOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
WasmSuspenderObject::kContinuationOffset));
|
|
||||||
suspender = no_reg;
|
suspender = no_reg;
|
||||||
|
|
||||||
__ StoreTaggedField(
|
__ StoreTaggedField(
|
||||||
@ -4685,8 +4677,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore);
|
active_continuation, kLRHasBeenSaved, SaveFPRegsMode::kIgnore);
|
||||||
FREE_REG(active_continuation);
|
FREE_REG(active_continuation);
|
||||||
int32_t active_continuation_offset =
|
int32_t active_continuation_offset =
|
||||||
TurboAssembler::RootRegisterOffsetForRootIndex(
|
MacroAssembler::RootRegisterOffsetForRootIndex(
|
||||||
RootIndex::kActiveContinuation);
|
RootIndex::kActiveContinuation);
|
||||||
__ Str(target_continuation,
|
__ Str(target_continuation,
|
||||||
MemOperand(kRootRegister, active_continuation_offset));
|
MemOperand(kRootRegister, active_continuation_offset));
|
||||||
|
|
||||||
@ -4731,7 +4723,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
__ bind(&suspend);
|
__ bind(&suspend);
|
||||||
__ LeaveFrame(StackFrame::STACK_SWITCH);
|
__ LeaveFrame(StackFrame::STACK_SWITCH);
|
||||||
// Pop receiver + parameter.
|
// Pop receiver + parameter.
|
||||||
__ DropArguments(2, TurboAssembler::kCountIncludesReceiver);
|
__ DropArguments(2, MacroAssembler::kCountIncludesReceiver);
|
||||||
__ Ret(lr);
|
__ Ret(lr);
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -5320,12 +5312,12 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
|
DCHECK(!AreAliased(receiver, holder, callback, data, undef, isolate_address,
|
||||||
name));
|
name));
|
||||||
|
|
||||||
__ LoadAnyTaggedField(data,
|
__ LoadTaggedField(data,
|
||||||
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
||||||
__ LoadRoot(undef, RootIndex::kUndefinedValue);
|
__ LoadRoot(undef, RootIndex::kUndefinedValue);
|
||||||
__ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
|
__ Mov(isolate_address, ExternalReference::isolate_address(masm->isolate()));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(name,
|
||||||
name, FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
||||||
|
|
||||||
// PropertyCallbackArguments:
|
// PropertyCallbackArguments:
|
||||||
// receiver, data, return value, return value default, isolate, holder,
|
// receiver, data, return value, return value default, isolate, holder,
|
||||||
@ -5384,9 +5376,9 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
|
|||||||
// DirectCEntry places the return address on the stack (updated by the GC),
|
// DirectCEntry places the return address on the stack (updated by the GC),
|
||||||
// making the call GC safe. The irregexp backend relies on this.
|
// making the call GC safe. The irregexp backend relies on this.
|
||||||
|
|
||||||
__ Poke<TurboAssembler::kSignLR>(lr, 0); // Store the return address.
|
__ Poke<MacroAssembler::kSignLR>(lr, 0); // Store the return address.
|
||||||
__ Blr(x10); // Call the C++ function.
|
__ Blr(x10); // Call the C++ function.
|
||||||
__ Peek<TurboAssembler::kAuthLR>(lr, 0); // Return to calling code.
|
__ Peek<MacroAssembler::kAuthLR>(lr, 0); // Return to calling code.
|
||||||
__ AssertFPCRState();
|
__ AssertFPCRState();
|
||||||
__ Ret();
|
__ Ret();
|
||||||
}
|
}
|
||||||
@ -5696,10 +5688,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = x22;
|
Register code_obj = x22;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -5731,11 +5723,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = x2;
|
Register feedback_vector = x2;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
|
@ -125,8 +125,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
|
__ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ ret(0);
|
__ ret(0);
|
||||||
|
|
||||||
__ bind(&stack_overflow);
|
__ bind(&stack_overflow);
|
||||||
@ -280,8 +280,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
__ LeaveFrame(StackFrame::CONSTRUCT);
|
__ LeaveFrame(StackFrame::CONSTRUCT);
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
|
__ DropArguments(edx, ecx, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ ret(0);
|
__ ret(0);
|
||||||
|
|
||||||
// Otherwise we do a smi check and fall through to check if the return value
|
// Otherwise we do a smi check and fall through to check if the return value
|
||||||
@ -768,8 +768,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
|
|||||||
__ leave();
|
__ leave();
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
|
__ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
@ -1810,8 +1810,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
__ bind(&no_this_arg);
|
__ bind(&no_this_arg);
|
||||||
__ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
|
__ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
|
|
||||||
// Restore receiver to edi.
|
// Restore receiver to edi.
|
||||||
__ movd(edi, xmm0);
|
__ movd(edi, xmm0);
|
||||||
@ -1919,8 +1919,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
|||||||
__ movd(xmm0, edx);
|
__ movd(xmm0, edx);
|
||||||
|
|
||||||
__ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
|
__ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
|
|
||||||
// Restore argumentsList.
|
// Restore argumentsList.
|
||||||
__ movd(edx, xmm0);
|
__ movd(edx, xmm0);
|
||||||
@ -1978,8 +1978,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
|||||||
|
|
||||||
__ DropArgumentsAndPushNewReceiver(
|
__ DropArgumentsAndPushNewReceiver(
|
||||||
eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
|
eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
|
|
||||||
// Restore argumentsList.
|
// Restore argumentsList.
|
||||||
__ movd(ecx, xmm0);
|
__ movd(ecx, xmm0);
|
||||||
|
@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(t3, TurboAssembler::kCountIsSmi,
|
__ DropArguments(t3, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver, t3);
|
MacroAssembler::kCountIncludesReceiver, t3);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
__ LeaveFrame(StackFrame::CONSTRUCT);
|
__ LeaveFrame(StackFrame::CONSTRUCT);
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(a1, TurboAssembler::kCountIsSmi,
|
__ DropArguments(a1, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver, a4);
|
MacroAssembler::kCountIncludesReceiver, a4);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
__ bind(&check_receiver);
|
__ bind(&check_receiver);
|
||||||
@ -803,8 +803,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
|
|||||||
__ LeaveFrame(StackFrame::INTERPRETED);
|
__ LeaveFrame(StackFrame::INTERPRETED);
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
|
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
@ -1328,7 +1328,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
|
|||||||
|
|
||||||
// Push the arguments.
|
// Push the arguments.
|
||||||
__ PushArray(start_address, num_args, scratch, scratch2,
|
__ PushArray(start_address, num_args, scratch, scratch2,
|
||||||
TurboAssembler::PushArrayOrder::kReverse);
|
MacroAssembler::PushArrayOrder::kReverse);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -1794,8 +1794,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||||||
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
|
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
|
||||||
__ Ld_d(receiver, MemOperand(sp, 0));
|
__ Ld_d(receiver, MemOperand(sp, 0));
|
||||||
__ DropArgumentsAndPushNewReceiver(argc, this_arg,
|
__ DropArgumentsAndPushNewReceiver(argc, this_arg,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -1889,8 +1889,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
|||||||
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
|
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
|
||||||
|
|
||||||
__ DropArgumentsAndPushNewReceiver(argc, this_argument,
|
__ DropArgumentsAndPushNewReceiver(argc, this_argument,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -1949,8 +1949,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
|||||||
__ Movz(new_target, target, scratch); // if argc == 2
|
__ Movz(new_target, target, scratch); // if argc == 2
|
||||||
|
|
||||||
__ DropArgumentsAndPushNewReceiver(argc, undefined_value,
|
__ DropArgumentsAndPushNewReceiver(argc, undefined_value,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
|
@ -112,8 +112,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(t3, TurboAssembler::kCountIsSmi,
|
__ DropArguments(t3, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver, t3);
|
MacroAssembler::kCountIncludesReceiver, t3);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,8 +267,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
__ LeaveFrame(StackFrame::CONSTRUCT);
|
__ LeaveFrame(StackFrame::CONSTRUCT);
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(a1, TurboAssembler::kCountIsSmi,
|
__ DropArguments(a1, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver, a4);
|
MacroAssembler::kCountIncludesReceiver, a4);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
__ bind(&check_receiver);
|
__ bind(&check_receiver);
|
||||||
@ -804,8 +804,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
|
|||||||
__ LeaveFrame(StackFrame::INTERPRETED);
|
__ LeaveFrame(StackFrame::INTERPRETED);
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
|
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
@ -1320,7 +1320,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
|
|||||||
|
|
||||||
// Push the arguments.
|
// Push the arguments.
|
||||||
__ PushArray(start_address, num_args, scratch, scratch2,
|
__ PushArray(start_address, num_args, scratch, scratch2,
|
||||||
TurboAssembler::PushArrayOrder::kReverse);
|
MacroAssembler::PushArrayOrder::kReverse);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -1784,8 +1784,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||||||
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
|
__ Movz(arg_array, undefined_value, scratch); // if argc == 1
|
||||||
__ Ld(receiver, MemOperand(sp));
|
__ Ld(receiver, MemOperand(sp));
|
||||||
__ DropArgumentsAndPushNewReceiver(argc, this_arg,
|
__ DropArgumentsAndPushNewReceiver(argc, this_arg,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -1881,8 +1881,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
|||||||
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
|
__ Movz(arguments_list, undefined_value, scratch); // if argc == 2
|
||||||
|
|
||||||
__ DropArgumentsAndPushNewReceiver(argc, this_argument,
|
__ DropArgumentsAndPushNewReceiver(argc, this_argument,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -1941,8 +1941,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
|||||||
__ Movz(new_target, target, scratch); // if argc == 2
|
__ Movz(new_target, target, scratch); // if argc == 2
|
||||||
|
|
||||||
__ DropArgumentsAndPushNewReceiver(argc, undefined_value,
|
__ DropArgumentsAndPushNewReceiver(argc, undefined_value,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
|
@ -64,7 +64,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
|
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE), r0);
|
||||||
__ bne(&done);
|
__ bne(&done);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data,
|
sfi_data,
|
||||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
|
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset), r0);
|
||||||
|
|
||||||
@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = r9;
|
Register code_obj = r9;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
|
code_obj, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset),
|
||||||
r0);
|
r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
|
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||||
|
|
||||||
@ -155,12 +155,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = r5;
|
Register feedback_vector = r5;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
|
||||||
r0);
|
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
@ -361,8 +360,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
|
|||||||
// Leave construct frame.
|
// Leave construct frame.
|
||||||
}
|
}
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
|
__ DropArguments(scratch, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ blr();
|
__ blr();
|
||||||
|
|
||||||
__ bind(&stack_overflow);
|
__ bind(&stack_overflow);
|
||||||
@ -431,7 +430,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4,
|
r4,
|
||||||
FieldMemOperand(
|
FieldMemOperand(
|
||||||
r3, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset),
|
r3, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset),
|
||||||
@ -495,7 +494,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
// -- sp[4*kSystemPointerSize]: context
|
// -- sp[4*kSystemPointerSize]: context
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
|
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r7);
|
||||||
@ -611,8 +610,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
__ LeaveFrame(StackFrame::CONSTRUCT);
|
__ LeaveFrame(StackFrame::CONSTRUCT);
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(r4, TurboAssembler::kCountIsSmi,
|
__ DropArguments(r4, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ blr();
|
__ blr();
|
||||||
|
|
||||||
__ bind(&check_receiver);
|
__ bind(&check_receiver);
|
||||||
@ -660,10 +659,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ AssertGeneratorObject(r4);
|
__ AssertGeneratorObject(r4);
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
||||||
__ LoadTaggedPointerField(cp, FieldMemOperand(r7, JSFunction::kContextOffset),
|
__ LoadTaggedField(cp, FieldMemOperand(r7, JSFunction::kContextOffset), r0);
|
||||||
r0);
|
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -703,12 +701,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
// Copy the function arguments from the generator object's register file.
|
// Copy the function arguments from the generator object's register file.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ LoadU16(
|
__ LoadU16(
|
||||||
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
|
r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ subi(r6, r6, Operand(kJSArgcReceiverSlots));
|
__ subi(r6, r6, Operand(kJSArgcReceiverSlots));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
|
r5, FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset),
|
||||||
r0);
|
r0);
|
||||||
{
|
{
|
||||||
@ -719,14 +717,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ blt(&done_loop);
|
__ blt(&done_loop);
|
||||||
__ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(r10, r6, Operand(kTaggedSizeLog2));
|
||||||
__ add(scratch, r5, r10);
|
__ add(scratch, r5, r10);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
|
FieldMemOperand(scratch, FixedArray::kHeaderSize), r0);
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
__ b(&loop);
|
__ b(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
|
|
||||||
// Push receiver.
|
// Push receiver.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
|
scratch, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset), r0);
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
}
|
}
|
||||||
@ -734,9 +732,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
|
r6, FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline);
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, r6, ip, &is_baseline);
|
||||||
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
|
__ CompareObjectType(r6, r6, r6, BYTECODE_ARRAY_TYPE);
|
||||||
@ -746,7 +744,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
r3, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ LoadU16(r3, FieldMemOperand(
|
__ LoadU16(r3, FieldMemOperand(
|
||||||
r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -756,8 +754,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ mr(r6, r4);
|
__ mr(r6, r4);
|
||||||
__ mr(r4, r7);
|
__ mr(r4, r7);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset),
|
__ LoadTaggedField(r5, FieldMemOperand(r4, JSFunction::kCodeOffset), r0);
|
||||||
r0);
|
|
||||||
__ JumpCodeObject(r5);
|
__ JumpCodeObject(r5);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -769,7 +766,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ PushRoot(RootIndex::kTheHoleValue);
|
__ PushRoot(RootIndex::kTheHoleValue);
|
||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(r4);
|
__ Pop(r4);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
||||||
}
|
}
|
||||||
__ b(&stepping_prepared);
|
__ b(&stepping_prepared);
|
||||||
@ -780,7 +777,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(r4);
|
__ Push(r4);
|
||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(r4);
|
__ Pop(r4);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset), r0);
|
||||||
}
|
}
|
||||||
__ b(&stepping_prepared);
|
__ b(&stepping_prepared);
|
||||||
@ -1119,8 +1116,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
|
|||||||
// Leave the frame (also dropping the register file).
|
// Leave the frame (also dropping the register file).
|
||||||
__ LeaveFrame(StackFrame::INTERPRETED);
|
__ LeaveFrame(StackFrame::INTERPRETED);
|
||||||
|
|
||||||
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
|
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
@ -1212,12 +1209,11 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
Register feedback_vector = ip;
|
Register feedback_vector = ip;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
|
||||||
r0);
|
|
||||||
__ AssertFeedbackVector(feedback_vector, r11);
|
__ AssertFeedbackVector(feedback_vector, r11);
|
||||||
|
|
||||||
// Check for an tiering state.
|
// Check for an tiering state.
|
||||||
@ -1378,10 +1374,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
|
|
||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
|
r7, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
// Load original bytecode array or the debug copy.
|
// Load original bytecode array or the debug copy.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kInterpreterBytecodeArrayRegister,
|
kInterpreterBytecodeArrayRegister,
|
||||||
FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
|
FieldMemOperand(r7, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||||
|
|
||||||
@ -1397,17 +1393,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bne(&compile_lazy);
|
__ bne(&compile_lazy);
|
||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset),
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
FieldMemOperand(feedback_vector, Cell::kValueOffset), r0);
|
||||||
r0);
|
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
// and update invocation count. Otherwise, setup the stack frame.
|
// and update invocation count. Otherwise, setup the stack frame.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
r7, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
||||||
__ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
|
__ LoadU16(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
|
||||||
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
|
__ cmpi(r7, Operand(FEEDBACK_VECTOR_TYPE));
|
||||||
@ -1589,17 +1584,17 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bind(&is_baseline);
|
__ bind(&is_baseline);
|
||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_vector,
|
feedback_vector,
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset), r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
FieldMemOperand(feedback_vector, Cell::kValueOffset),
|
||||||
r0);
|
r0);
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset), r0);
|
||||||
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
||||||
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0);
|
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE), r0);
|
||||||
@ -1636,7 +1631,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
|
|||||||
__ sub(start_address, start_address, scratch);
|
__ sub(start_address, start_address, scratch);
|
||||||
// Push the arguments.
|
// Push the arguments.
|
||||||
__ PushArray(start_address, num_args, scratch, r0,
|
__ PushArray(start_address, num_args, scratch, r0,
|
||||||
TurboAssembler::PushArrayOrder::kReverse);
|
MacroAssembler::PushArrayOrder::kReverse);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -1773,16 +1768,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// get the custom trampoline, otherwise grab the entry address of the global
|
// get the custom trampoline, otherwise grab the entry address of the global
|
||||||
// trampoline.
|
// trampoline.
|
||||||
__ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
|
r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
|
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset), r0);
|
||||||
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
|
__ CompareObjectType(r5, kInterpreterDispatchTableRegister,
|
||||||
kInterpreterDispatchTableRegister,
|
kInterpreterDispatchTableRegister,
|
||||||
INTERPRETER_DATA_TYPE);
|
INTERPRETER_DATA_TYPE);
|
||||||
__ bne(&builtin_trampoline);
|
__ bne(&builtin_trampoline);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
|
r5, FieldMemOperand(r5, InterpreterData::kInterpreterTrampolineOffset),
|
||||||
r0);
|
r0);
|
||||||
__ LoadCodeEntry(r5, r5);
|
__ LoadCodeEntry(r5, r5);
|
||||||
@ -2027,8 +2022,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||||||
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
|
__ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
|
||||||
|
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
|
__ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2111,8 +2106,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
|||||||
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
|
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
|
||||||
|
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ DropArgumentsAndPushNewReceiver(r3, r8, TurboAssembler::kCountIsInteger,
|
__ DropArgumentsAndPushNewReceiver(r3, r8, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2160,8 +2155,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
|||||||
__ blt(&done);
|
__ blt(&done);
|
||||||
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
|
__ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ DropArgumentsAndPushNewReceiver(r3, r7, TurboAssembler::kCountIsInteger,
|
__ DropArgumentsAndPushNewReceiver(r3, r7, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2240,8 +2235,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
|
// Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0.
|
||||||
Label ok, fail;
|
Label ok, fail;
|
||||||
__ AssertNotSmi(r5);
|
__ AssertNotSmi(r5);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset),
|
||||||
FieldMemOperand(r5, HeapObject::kMapOffset), r0);
|
r0);
|
||||||
__ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
__ LoadU16(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||||
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
|
__ cmpi(scratch, Operand(FIXED_ARRAY_TYPE));
|
||||||
__ beq(&ok);
|
__ beq(&ok);
|
||||||
@ -2276,7 +2271,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
||||||
__ mtctr(r7);
|
__ mtctr(r7);
|
||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ LoadTaggedPointerField(scratch, MemOperand(r5, kTaggedSize), r0);
|
__ LoadTaggedField(scratch, MemOperand(r5, kTaggedSize), r0);
|
||||||
__ addi(r5, r5, Operand(kTaggedSize));
|
__ addi(r5, r5, Operand(kTaggedSize));
|
||||||
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
||||||
__ bne(&skip);
|
__ bne(&skip);
|
||||||
@ -2311,8 +2306,8 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
|||||||
if (mode == CallOrConstructMode::kConstruct) {
|
if (mode == CallOrConstructMode::kConstruct) {
|
||||||
Label new_target_constructor, new_target_not_constructor;
|
Label new_target_constructor, new_target_not_constructor;
|
||||||
__ JumpIfSmi(r6, &new_target_not_constructor);
|
__ JumpIfSmi(r6, &new_target_not_constructor);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(r6, HeapObject::kMapOffset),
|
||||||
FieldMemOperand(r6, HeapObject::kMapOffset), r0);
|
r0);
|
||||||
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
__ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
||||||
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
|
__ TestBit(scratch, Map::Bits1::IsConstructorBit::kShift, r0);
|
||||||
__ bne(&new_target_constructor, cr0);
|
__ bne(&new_target_constructor, cr0);
|
||||||
@ -2395,14 +2390,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
__ AssertCallableFunction(r4);
|
__ AssertCallableFunction(r4);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
|
|
||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset),
|
__ LoadTaggedField(cp, FieldMemOperand(r4, JSFunction::kContextOffset), r0);
|
||||||
r0);
|
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
|
__ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -2456,7 +2450,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(r3, r4);
|
__ Pop(r3, r4);
|
||||||
__ SmiUntag(r3);
|
__ SmiUntag(r3);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ bind(&convert_receiver);
|
__ bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2487,7 +2481,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Load [[BoundArguments]] into r5 and length of that into r7.
|
// Load [[BoundArguments]] into r5 and length of that into r7.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
|
r5, FieldMemOperand(r4, JSBoundFunction::kBoundArgumentsOffset), r0);
|
||||||
__ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
|
__ SmiUntag(r7, FieldMemOperand(r5, FixedArray::kLengthOffset), SetRC, r0);
|
||||||
__ beq(&no_bound_arguments, cr0);
|
__ beq(&no_bound_arguments, cr0);
|
||||||
@ -2536,7 +2530,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
__ subi(r7, r7, Operand(1));
|
__ subi(r7, r7, Operand(1));
|
||||||
__ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(scratch, r7, Operand(kTaggedSizeLog2));
|
||||||
__ add(scratch, scratch, r5);
|
__ add(scratch, scratch, r5);
|
||||||
__ LoadAnyTaggedField(scratch, MemOperand(scratch), r0);
|
__ LoadTaggedField(scratch, MemOperand(scratch), r0);
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
__ bdnz(&loop);
|
__ bdnz(&loop);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
@ -2559,15 +2553,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
__ AssertBoundFunction(r4);
|
__ AssertBoundFunction(r4);
|
||||||
|
|
||||||
// Patch the receiver to [[BoundThis]].
|
// Patch the receiver to [[BoundThis]].
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset),
|
||||||
r6, FieldMemOperand(r4, JSBoundFunction::kBoundThisOffset), r0);
|
r0);
|
||||||
__ StoreReceiver(r6, r3, ip);
|
__ StoreReceiver(r6, r3, ip);
|
||||||
|
|
||||||
// Push the [[BoundArguments]] onto the stack.
|
// Push the [[BoundArguments]] onto the stack.
|
||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2667,7 +2661,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
Label call_generic_stub;
|
Label call_generic_stub;
|
||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset), r0);
|
||||||
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
__ lwz(r7, FieldMemOperand(r7, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
__ mov(ip, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||||
@ -2699,12 +2693,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
Label skip;
|
Label skip;
|
||||||
__ CompareTagged(r4, r6);
|
__ CompareTagged(r4, r6);
|
||||||
__ bne(&skip);
|
__ bne(&skip);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
r6, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
||||||
__ bind(&skip);
|
__ bind(&skip);
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
r4, FieldMemOperand(r4, JSBoundFunction::kBoundTargetFunctionOffset), r0);
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2728,8 +2722,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
__ JumpIfSmi(target, &non_constructor);
|
__ JumpIfSmi(target, &non_constructor);
|
||||||
|
|
||||||
// Check if target has a [[Construct]] internal method.
|
// Check if target has a [[Construct]] internal method.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
|
||||||
map, FieldMemOperand(target, HeapObject::kMapOffset), r0);
|
|
||||||
{
|
{
|
||||||
Register flags = r5;
|
Register flags = r5;
|
||||||
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
||||||
@ -2817,15 +2810,15 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
Register scratch = ip;
|
Register scratch = ip;
|
||||||
Label allocate_vector, done;
|
Label allocate_vector, done;
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
vector,
|
vector,
|
||||||
FieldMemOperand(kWasmInstanceRegister,
|
FieldMemOperand(kWasmInstanceRegister,
|
||||||
WasmInstanceObject::kFeedbackVectorsOffset),
|
WasmInstanceObject::kFeedbackVectorsOffset),
|
||||||
scratch);
|
scratch);
|
||||||
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
||||||
__ AddS64(vector, vector, scratch);
|
__ AddS64(vector, vector, scratch);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize),
|
||||||
vector, FieldMemOperand(vector, FixedArray::kHeaderSize), scratch);
|
scratch);
|
||||||
__ JumpIfSmi(vector, &allocate_vector);
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ push(kWasmInstanceRegister);
|
__ push(kWasmInstanceRegister);
|
||||||
@ -3530,16 +3523,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
|
|
||||||
__ push(receiver);
|
__ push(receiver);
|
||||||
// Push data from AccessorInfo.
|
// Push data from AccessorInfo.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
|
FieldMemOperand(callback, AccessorInfo::kDataOffset), r0);
|
||||||
__ push(scratch);
|
__ push(scratch);
|
||||||
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
||||||
__ Push(scratch, scratch);
|
__ Push(scratch, scratch);
|
||||||
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
||||||
__ Push(scratch, holder);
|
__ Push(scratch, holder);
|
||||||
__ Push(Smi::zero()); // should_throw_on_error -> false
|
__ Push(Smi::zero()); // should_throw_on_error -> false
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
|
FieldMemOperand(callback, AccessorInfo::kNameOffset), r0);
|
||||||
__ push(scratch);
|
__ push(scratch);
|
||||||
|
|
||||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||||
|
@ -155,7 +155,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
{
|
{
|
||||||
UseScratchRegisterScope temps(masm);
|
UseScratchRegisterScope temps(masm);
|
||||||
Register func_info = temps.Acquire();
|
Register func_info = temps.Acquire();
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
func_info, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Load32U(func_info,
|
__ Load32U(func_info,
|
||||||
FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
|
FieldMemOperand(func_info, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -353,7 +353,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
|
|
||||||
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
|
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
|
||||||
Label::Distance::kNear);
|
Label::Distance::kNear);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data,
|
sfi_data,
|
||||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||||
|
|
||||||
@ -377,10 +377,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ AssertGeneratorObject(a1);
|
__ AssertGeneratorObject(a1);
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(a4,
|
||||||
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(a4, JSFunction::kContextOffset));
|
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -417,12 +416,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// context allocation for any variables in generators, the actual argument
|
// context allocation for any variables in generators, the actual argument
|
||||||
// values have already been copied into the context and these dummy values
|
// values have already been copied into the context and these dummy values
|
||||||
// will never be used.
|
// will never be used.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Lhu(a3,
|
__ Lhu(a3,
|
||||||
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
|
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ SubWord(a3, a3, Operand(kJSArgcReceiverSlots));
|
__ SubWord(a3, a3, Operand(kJSArgcReceiverSlots));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t1,
|
t1,
|
||||||
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
{
|
{
|
||||||
@ -431,23 +430,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ SubWord(a3, a3, Operand(1));
|
__ SubWord(a3, a3, Operand(1));
|
||||||
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
|
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
|
||||||
__ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
|
__ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(kScratchReg,
|
||||||
kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
|
FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
|
||||||
__ Push(kScratchReg);
|
__ Push(kScratchReg);
|
||||||
__ Branch(&loop);
|
__ Branch(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
// Push receiver.
|
// Push receiver.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(kScratchReg,
|
||||||
kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
|
||||||
__ Push(kScratchReg);
|
__ Push(kScratchReg);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
|
a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline);
|
||||||
__ GetObjectType(a3, a3, a3);
|
__ GetObjectType(a3, a3, a3);
|
||||||
@ -458,7 +457,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Lhu(a0, FieldMemOperand(
|
__ Lhu(a0, FieldMemOperand(
|
||||||
a0, SharedFunctionInfo::kFormalParameterCountOffset));
|
a0, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -468,7 +467,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Move(a3, a1);
|
__ Move(a3, a1);
|
||||||
__ Move(a1, a4);
|
__ Move(a1, a4);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
|
__ LoadTaggedField(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
|
||||||
__ JumpCodeObject(a2);
|
__ JumpCodeObject(a2);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -481,8 +480,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(a1);
|
__ Pop(a1);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(a4,
|
||||||
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||||
__ Branch(&stepping_prepared);
|
__ Branch(&stepping_prepared);
|
||||||
|
|
||||||
__ bind(&prepare_step_in_suspended_generator);
|
__ bind(&prepare_step_in_suspended_generator);
|
||||||
@ -492,8 +491,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(a1);
|
__ Pop(a1);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(a4,
|
||||||
a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
|
||||||
__ Branch(&stepping_prepared);
|
__ Branch(&stepping_prepared);
|
||||||
|
|
||||||
__ bind(&stack_overflow);
|
__ bind(&stack_overflow);
|
||||||
@ -1130,10 +1129,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
Register feedback_vector = a2;
|
Register feedback_vector = a2;
|
||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kScratchReg,
|
kScratchReg,
|
||||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kInterpreterBytecodeArrayRegister,
|
kInterpreterBytecodeArrayRegister,
|
||||||
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
@ -1147,17 +1146,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
|
__ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
|
||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
// and update invocation count. Otherwise, setup the stack frame.
|
// and update invocation count. Otherwise, setup the stack frame.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(a4,
|
||||||
a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
|
__ Lhu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
|
||||||
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
|
__ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE),
|
||||||
Label::Distance::kNear);
|
Label::Distance::kNear);
|
||||||
@ -1331,16 +1329,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bind(&is_baseline);
|
__ bind(&is_baseline);
|
||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_vector,
|
feedback_vector,
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
|
__ Lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
|
||||||
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
|
__ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
|
||||||
@ -1381,7 +1379,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
|
|||||||
|
|
||||||
// Push the arguments.
|
// Push the arguments.
|
||||||
__ PushArray(start_address, num_args,
|
__ PushArray(start_address, num_args,
|
||||||
TurboAssembler::PushArrayOrder::kReverse);
|
MacroAssembler::PushArrayOrder::kReverse);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -1511,16 +1509,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// get the custom trampoline, otherwise grab the entry address of the global
|
// get the custom trampoline, otherwise grab the entry address of the global
|
||||||
// trampoline.
|
// trampoline.
|
||||||
__ LoadWord(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ LoadWord(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
|
t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
|
t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
__ GetObjectType(t0, kInterpreterDispatchTableRegister,
|
__ GetObjectType(t0, kInterpreterDispatchTableRegister,
|
||||||
kInterpreterDispatchTableRegister);
|
kInterpreterDispatchTableRegister);
|
||||||
__ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
|
__ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
|
||||||
Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
|
Operand(INTERPRETER_DATA_TYPE), Label::Distance::kNear);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
|
t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
|
||||||
__ LoadCodeEntry(t0, t0);
|
__ LoadCodeEntry(t0, t0);
|
||||||
__ BranchShort(&trampoline_loaded);
|
__ BranchShort(&trampoline_loaded);
|
||||||
@ -1778,7 +1776,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a1,
|
a1,
|
||||||
MemOperand(a0,
|
MemOperand(a0,
|
||||||
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset -
|
InstructionStream::kDeoptimizationDataOrInterpreterDataOffset -
|
||||||
@ -2152,7 +2150,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
__ SubWord(scratch, sp, Operand(scratch));
|
__ SubWord(scratch, sp, Operand(scratch));
|
||||||
__ LoadRoot(hole_value, RootIndex::kTheHoleValue);
|
__ LoadRoot(hole_value, RootIndex::kTheHoleValue);
|
||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ LoadTaggedPointerField(a5, MemOperand(src));
|
__ LoadTaggedField(a5, MemOperand(src));
|
||||||
__ AddWord(src, src, kTaggedSize);
|
__ AddWord(src, src, kTaggedSize);
|
||||||
__ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear);
|
__ Branch(&push, ne, a5, Operand(hole_value), Label::Distance::kNear);
|
||||||
__ LoadRoot(a5, RootIndex::kUndefinedValue);
|
__ LoadRoot(a5, RootIndex::kUndefinedValue);
|
||||||
@ -2190,8 +2188,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
|||||||
UseScratchRegisterScope temps(masm);
|
UseScratchRegisterScope temps(masm);
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ JumpIfSmi(a3, &new_target_not_constructor);
|
__ JumpIfSmi(a3, &new_target_not_constructor);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(a3, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(a3, HeapObject::kMapOffset));
|
|
||||||
__ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
__ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
||||||
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
||||||
__ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
|
__ Branch(&new_target_constructor, ne, scratch, Operand(zero_reg),
|
||||||
@ -2271,7 +2268,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ AssertCallableFunction(a1);
|
__ AssertCallableFunction(a1);
|
||||||
|
|
||||||
Label class_constructor;
|
Label class_constructor;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ And(kScratchReg, a3,
|
__ And(kScratchReg, a3,
|
||||||
@ -2281,8 +2278,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(a1, JSFunction::kContextOffset));
|
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
__ Load32U(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -2337,7 +2333,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(a0, a1);
|
__ Pop(a0, a1);
|
||||||
__ SmiUntag(a0);
|
__ SmiUntag(a0);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ bind(&convert_receiver);
|
__ bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2379,7 +2375,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
Register bound_argv = a2;
|
Register bound_argv = a2;
|
||||||
// Load [[BoundArguments]] into a2 and length of that into a4.
|
// Load [[BoundArguments]] into a2 and length of that into a4.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
|
bound_argv, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagField(bound_argc,
|
__ SmiUntagField(bound_argc,
|
||||||
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
FieldMemOperand(bound_argv, FixedArray::kLengthOffset));
|
||||||
@ -2423,7 +2419,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
__ SubWord(a4, a4, Operand(1));
|
__ SubWord(a4, a4, Operand(1));
|
||||||
__ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
|
__ Branch(&done_loop, lt, a4, Operand(zero_reg), Label::Distance::kNear);
|
||||||
__ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
|
__ CalcScaledAddress(a5, a2, a4, kTaggedSizeLog2);
|
||||||
__ LoadAnyTaggedField(kScratchReg, MemOperand(a5));
|
__ LoadTaggedField(kScratchReg, MemOperand(a5));
|
||||||
__ Push(kScratchReg);
|
__ Push(kScratchReg);
|
||||||
__ Branch(&loop);
|
__ Branch(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
@ -2449,8 +2445,8 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
{
|
{
|
||||||
UseScratchRegisterScope temps(masm);
|
UseScratchRegisterScope temps(masm);
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
|
FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
|
||||||
__ StoreReceiver(scratch, a0, kScratchReg);
|
__ StoreReceiver(scratch, a0, kScratchReg);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2458,7 +2454,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2548,7 +2544,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
Label call_generic_stub;
|
Label call_generic_stub;
|
||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ Load32U(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
|
__ Load32U(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
__ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||||
@ -2587,12 +2583,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
__ Branch(&skip, ne, a1, Operand(a3), Label::Distance::kNear);
|
__ Branch(&skip, ne, a1, Operand(a3), Label::Distance::kNear);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ bind(&skip);
|
__ bind(&skip);
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2615,7 +2611,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
temps.Include(t0, t1);
|
temps.Include(t0, t1);
|
||||||
Register map = temps.Acquire();
|
Register map = temps.Acquire();
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ LoadTaggedPointerField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
|
__ LoadTaggedField(map, FieldMemOperand(a1, HeapObject::kMapOffset));
|
||||||
__ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
|
__ Lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
|
||||||
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
__ And(scratch, scratch, Operand(Map::Bits1::IsConstructorBit::kMask));
|
||||||
__ Branch(&non_constructor, eq, scratch, Operand(zero_reg));
|
__ Branch(&non_constructor, eq, scratch, Operand(zero_reg));
|
||||||
@ -3366,8 +3362,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
__ SubWord(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
|
__ SubWord(sp, sp, (PCA::kArgsLength + 1) * kSystemPointerSize);
|
||||||
__ StoreWord(receiver,
|
__ StoreWord(receiver,
|
||||||
MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
|
MemOperand(sp, (PCA::kThisIndex + 1) * kSystemPointerSize));
|
||||||
__ LoadAnyTaggedField(scratch,
|
__ LoadTaggedField(scratch,
|
||||||
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
FieldMemOperand(callback, AccessorInfo::kDataOffset));
|
||||||
__ StoreWord(scratch,
|
__ StoreWord(scratch,
|
||||||
MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
|
MemOperand(sp, (PCA::kDataIndex + 1) * kSystemPointerSize));
|
||||||
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
||||||
@ -3385,8 +3381,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
DCHECK_EQ(0, Smi::zero().ptr());
|
DCHECK_EQ(0, Smi::zero().ptr());
|
||||||
__ StoreWord(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
|
__ StoreWord(zero_reg, MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) *
|
||||||
kSystemPointerSize));
|
kSystemPointerSize));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
FieldMemOperand(callback, AccessorInfo::kNameOffset));
|
||||||
__ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize));
|
__ StoreWord(scratch, MemOperand(sp, 0 * kSystemPointerSize));
|
||||||
|
|
||||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||||
@ -3677,10 +3673,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = s1;
|
Register code_obj = s1;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -3719,11 +3715,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Replace BytecodeOffset with the feedback vector.
|
// Replace BytecodeOffset with the feedback vector.
|
||||||
Register feedback_vector = a2;
|
Register feedback_vector = a2;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
|
@ -65,7 +65,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE));
|
__ CmpS32(scratch1, Operand(INTERPRETER_DATA_TYPE));
|
||||||
__ bne(&done);
|
__ bne(&done);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data,
|
sfi_data,
|
||||||
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||||
|
|
||||||
@ -120,10 +120,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = r8;
|
Register code_obj = r8;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
code_obj,
|
code_obj,
|
||||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -155,11 +155,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
|
|
||||||
// Load the feedback vector.
|
// Load the feedback vector.
|
||||||
Register feedback_vector = r4;
|
Register feedback_vector = r4;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
@ -320,7 +319,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
// <deopt_data> = <code>[#deoptimization_data_offset]
|
// <deopt_data> = <code>[#deoptimization_data_offset]
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r3,
|
r3,
|
||||||
FieldMemOperand(
|
FieldMemOperand(
|
||||||
r2, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
r2, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||||
@ -428,8 +427,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
|
|||||||
// Leave construct frame.
|
// Leave construct frame.
|
||||||
}
|
}
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
|
__ DropArguments(scratch, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
__ bind(&stack_overflow);
|
__ bind(&stack_overflow);
|
||||||
@ -472,7 +471,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
// -- sp[4*kSystemPointerSize]: context
|
// -- sp[4*kSystemPointerSize]: context
|
||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
|
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(r6);
|
||||||
@ -584,8 +583,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
__ LeaveFrame(StackFrame::CONSTRUCT);
|
__ LeaveFrame(StackFrame::CONSTRUCT);
|
||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(r3, TurboAssembler::kCountIsSmi,
|
__ DropArguments(r3, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ Ret();
|
__ Ret();
|
||||||
|
|
||||||
__ bind(&check_receiver);
|
__ bind(&check_receiver);
|
||||||
@ -633,10 +632,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ AssertGeneratorObject(r3);
|
__ AssertGeneratorObject(r3);
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r6,
|
||||||
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(r6, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(r6, JSFunction::kContextOffset));
|
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -677,12 +675,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
// Copy the function arguments from the generator object's register file.
|
// Copy the function arguments from the generator object's register file.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadU16(
|
__ LoadU16(
|
||||||
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
|
r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
|
__ SubS64(r5, r5, Operand(kJSArgcReceiverSlots));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4,
|
r4,
|
||||||
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
{
|
{
|
||||||
@ -692,24 +690,24 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ blt(&done_loop);
|
__ blt(&done_loop);
|
||||||
__ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2));
|
||||||
__ la(scratch, MemOperand(r4, r1));
|
__ la(scratch, MemOperand(r4, r1));
|
||||||
__ LoadAnyTaggedField(scratch,
|
__ LoadTaggedField(scratch,
|
||||||
FieldMemOperand(scratch, FixedArray::kHeaderSize));
|
FieldMemOperand(scratch, FixedArray::kHeaderSize));
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
__ b(&loop);
|
__ b(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
|
|
||||||
// Push receiver.
|
// Push receiver.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
|
r5, FieldMemOperand(r5, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline);
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, r5, ip, &is_baseline);
|
||||||
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
|
__ CompareObjectType(r5, r5, r5, BYTECODE_ARRAY_TYPE);
|
||||||
@ -719,7 +717,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
r2, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadS16(
|
__ LoadS16(
|
||||||
r2,
|
r2,
|
||||||
@ -730,7 +728,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ mov(r5, r3);
|
__ mov(r5, r3);
|
||||||
__ mov(r3, r6);
|
__ mov(r3, r6);
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r4, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
__ LoadTaggedField(r4, FieldMemOperand(r3, JSFunction::kCodeOffset));
|
||||||
__ JumpCodeObject(r4);
|
__ JumpCodeObject(r4);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -742,8 +740,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ PushRoot(RootIndex::kTheHoleValue);
|
__ PushRoot(RootIndex::kTheHoleValue);
|
||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(r3);
|
__ Pop(r3);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r6,
|
||||||
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ b(&stepping_prepared);
|
__ b(&stepping_prepared);
|
||||||
|
|
||||||
@ -753,8 +751,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(r3);
|
__ Push(r3);
|
||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(r3);
|
__ Pop(r3);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r6,
|
||||||
r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ b(&stepping_prepared);
|
__ b(&stepping_prepared);
|
||||||
|
|
||||||
@ -1148,8 +1146,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
|
|||||||
// Leave the frame (also dropping the register file).
|
// Leave the frame (also dropping the register file).
|
||||||
__ LeaveFrame(StackFrame::INTERPRETED);
|
__ LeaveFrame(StackFrame::INTERPRETED);
|
||||||
|
|
||||||
__ DropArguments(params_size, TurboAssembler::kCountIsBytes,
|
__ DropArguments(params_size, MacroAssembler::kCountIsBytes,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
@ -1245,11 +1243,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
Register feedback_vector = ip;
|
Register feedback_vector = ip;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
__ AssertFeedbackVector(feedback_vector, r1);
|
__ AssertFeedbackVector(feedback_vector, r1);
|
||||||
|
|
||||||
// Check for an tiering state.
|
// Check for an tiering state.
|
||||||
@ -1406,10 +1403,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
|
|
||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
r6, FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
// Load original bytecode array or the debug copy.
|
// Load original bytecode array or the debug copy.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kInterpreterBytecodeArrayRegister,
|
kInterpreterBytecodeArrayRegister,
|
||||||
FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
|
FieldMemOperand(r6, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
@ -1425,17 +1422,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bne(&compile_lazy);
|
__ bne(&compile_lazy);
|
||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector,
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
__ LoadTaggedField(feedback_vector,
|
||||||
__ LoadTaggedPointerField(
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
// and update invocation count. Otherwise, setup the stack frame.
|
// and update invocation count. Otherwise, setup the stack frame.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r6,
|
||||||
r6, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
|
__ LoadU16(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
|
||||||
__ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
|
__ CmpS64(r6, Operand(FEEDBACK_VECTOR_TYPE));
|
||||||
__ bne(&push_stack_frame);
|
__ bne(&push_stack_frame);
|
||||||
@ -1611,16 +1607,16 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
__ bind(&is_baseline);
|
__ bind(&is_baseline);
|
||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
feedback_vector,
|
feedback_vector,
|
||||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_vector,
|
||||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
// allocate it.
|
// allocate it.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
ip, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
|
||||||
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
__ LoadU16(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
|
||||||
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
|
__ CmpS32(ip, Operand(FEEDBACK_VECTOR_TYPE));
|
||||||
@ -1657,7 +1653,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
|
|||||||
__ SubS64(start_address, start_address, scratch);
|
__ SubS64(start_address, start_address, scratch);
|
||||||
// Push the arguments.
|
// Push the arguments.
|
||||||
__ PushArray(start_address, num_args, r1, scratch,
|
__ PushArray(start_address, num_args, r1, scratch,
|
||||||
TurboAssembler::PushArrayOrder::kReverse);
|
MacroAssembler::PushArrayOrder::kReverse);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -1792,16 +1788,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// get the custom trampoline, otherwise grab the entry address of the global
|
// get the custom trampoline, otherwise grab the entry address of the global
|
||||||
// trampoline.
|
// trampoline.
|
||||||
__ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
__ LoadU64(r4, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
|
r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
|
r4, FieldMemOperand(r4, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
__ CompareObjectType(r4, kInterpreterDispatchTableRegister,
|
__ CompareObjectType(r4, kInterpreterDispatchTableRegister,
|
||||||
kInterpreterDispatchTableRegister,
|
kInterpreterDispatchTableRegister,
|
||||||
INTERPRETER_DATA_TYPE);
|
INTERPRETER_DATA_TYPE);
|
||||||
__ bne(&builtin_trampoline);
|
__ bne(&builtin_trampoline);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
|
r4, FieldMemOperand(r4, InterpreterData::kInterpreterTrampolineOffset));
|
||||||
__ LoadCodeEntry(r4, r4);
|
__ LoadCodeEntry(r4, r4);
|
||||||
__ b(&trampoline_loaded);
|
__ b(&trampoline_loaded);
|
||||||
@ -2022,8 +2018,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||||||
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
|
__ LoadU64(r4, MemOperand(sp, 2 * kSystemPointerSize)); // argArray
|
||||||
|
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
|
__ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2107,8 +2103,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
|||||||
__ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
|
__ LoadU64(r4, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
|
||||||
|
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ DropArgumentsAndPushNewReceiver(r2, r7, TurboAssembler::kCountIsInteger,
|
__ DropArgumentsAndPushNewReceiver(r2, r7, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2157,8 +2153,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
|||||||
__ blt(&done);
|
__ blt(&done);
|
||||||
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
|
__ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ DropArgumentsAndPushNewReceiver(r2, r6, TurboAssembler::kCountIsInteger,
|
__ DropArgumentsAndPushNewReceiver(r2, r6, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2240,8 +2236,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
|
// Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0.
|
||||||
Label ok, fail;
|
Label ok, fail;
|
||||||
__ AssertNotSmi(r4);
|
__ AssertNotSmi(r4);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(r4, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(r4, HeapObject::kMapOffset));
|
|
||||||
__ LoadS16(scratch,
|
__ LoadS16(scratch,
|
||||||
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
||||||
__ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE));
|
__ CmpS64(scratch, Operand(FIXED_ARRAY_TYPE));
|
||||||
@ -2277,7 +2272,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kTaggedSize));
|
||||||
__ mov(r1, r6);
|
__ mov(r1, r6);
|
||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ LoadAnyTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
|
__ LoadTaggedField(scratch, MemOperand(r4, kTaggedSize), r0);
|
||||||
__ la(r4, MemOperand(r4, kTaggedSize));
|
__ la(r4, MemOperand(r4, kTaggedSize));
|
||||||
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
__ CompareRoot(scratch, RootIndex::kTheHoleValue);
|
||||||
__ bne(&skip, Label::kNear);
|
__ bne(&skip, Label::kNear);
|
||||||
@ -2312,8 +2307,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
|
|||||||
if (mode == CallOrConstructMode::kConstruct) {
|
if (mode == CallOrConstructMode::kConstruct) {
|
||||||
Label new_target_constructor, new_target_not_constructor;
|
Label new_target_constructor, new_target_not_constructor;
|
||||||
__ JumpIfSmi(r5, &new_target_not_constructor);
|
__ JumpIfSmi(r5, &new_target_not_constructor);
|
||||||
__ LoadTaggedPointerField(scratch,
|
__ LoadTaggedField(scratch, FieldMemOperand(r5, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(r5, HeapObject::kMapOffset));
|
|
||||||
__ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
__ LoadU8(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
||||||
__ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
|
__ tmll(scratch, Operand(Map::Bits1::IsConstructorBit::kShift));
|
||||||
__ bne(&new_target_constructor);
|
__ bne(&new_target_constructor);
|
||||||
@ -2397,14 +2391,13 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
__ AssertCallableFunction(r3);
|
__ AssertCallableFunction(r3);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||||
|
|
||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(cp,
|
__ LoadTaggedField(cp, FieldMemOperand(r3, JSFunction::kContextOffset));
|
||||||
FieldMemOperand(r3, JSFunction::kContextOffset));
|
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
|
__ LoadU32(r5, FieldMemOperand(r4, SharedFunctionInfo::kFlagsOffset));
|
||||||
@ -2458,7 +2451,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(r2, r3);
|
__ Pop(r2, r3);
|
||||||
__ SmiUntag(r2);
|
__ SmiUntag(r2);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ bind(&convert_receiver);
|
__ bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2489,7 +2482,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Load [[BoundArguments]] into r4 and length of that into r6.
|
// Load [[BoundArguments]] into r4 and length of that into r6.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
|
r4, FieldMemOperand(r3, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
__ SmiUntagField(r6, FieldMemOperand(r4, FixedArray::kLengthOffset));
|
||||||
__ LoadAndTestP(r6, r6);
|
__ LoadAndTestP(r6, r6);
|
||||||
@ -2535,7 +2528,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ SubS64(r1, r6, Operand(1));
|
__ SubS64(r1, r6, Operand(1));
|
||||||
__ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(r1, r1, Operand(kTaggedSizeLog2));
|
||||||
__ LoadAnyTaggedField(scratch, MemOperand(r4, r1), r0);
|
__ LoadTaggedField(scratch, MemOperand(r4, r1), r0);
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
__ SubS64(r6, r6, Operand(1));
|
__ SubS64(r6, r6, Operand(1));
|
||||||
__ bgt(&loop);
|
__ bgt(&loop);
|
||||||
@ -2559,15 +2552,15 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
__ AssertBoundFunction(r3);
|
__ AssertBoundFunction(r3);
|
||||||
|
|
||||||
// Patch the receiver to [[BoundThis]].
|
// Patch the receiver to [[BoundThis]].
|
||||||
__ LoadAnyTaggedField(r5,
|
__ LoadTaggedField(r5,
|
||||||
FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
|
FieldMemOperand(r3, JSBoundFunction::kBoundThisOffset));
|
||||||
__ StoreReceiver(r5, r2, r1);
|
__ StoreReceiver(r5, r2, r1);
|
||||||
|
|
||||||
// Push the [[BoundArguments]] onto the stack.
|
// Push the [[BoundArguments]] onto the stack.
|
||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2667,7 +2660,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
Label call_generic_stub;
|
Label call_generic_stub;
|
||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
__ LoadU32(r6, FieldMemOperand(r6, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
__ AndP(r6, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||||
@ -2698,12 +2691,12 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
Label skip;
|
Label skip;
|
||||||
__ CompareTagged(r3, r5);
|
__ CompareTagged(r3, r5);
|
||||||
__ bne(&skip);
|
__ bne(&skip);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
r5, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ bind(&skip);
|
__ bind(&skip);
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
r3, FieldMemOperand(r3, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2727,8 +2720,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
__ JumpIfSmi(target, &non_constructor);
|
__ JumpIfSmi(target, &non_constructor);
|
||||||
|
|
||||||
// Check if target has a [[Construct]] internal method.
|
// Check if target has a [[Construct]] internal method.
|
||||||
__ LoadTaggedPointerField(map,
|
__ LoadTaggedField(map, FieldMemOperand(target, HeapObject::kMapOffset));
|
||||||
FieldMemOperand(target, HeapObject::kMapOffset));
|
|
||||||
{
|
{
|
||||||
Register flags = r4;
|
Register flags = r4;
|
||||||
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
DCHECK(!AreAliased(argc, target, map, instance_type, flags));
|
||||||
@ -2811,13 +2803,12 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
Register scratch = r0;
|
Register scratch = r0;
|
||||||
Label allocate_vector, done;
|
Label allocate_vector, done;
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
vector, FieldMemOperand(kWasmInstanceRegister,
|
vector, FieldMemOperand(kWasmInstanceRegister,
|
||||||
WasmInstanceObject::kFeedbackVectorsOffset));
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
__ ShiftLeftU64(scratch, func_index, Operand(kTaggedSizeLog2));
|
||||||
__ AddS64(vector, vector, scratch);
|
__ AddS64(vector, vector, scratch);
|
||||||
__ LoadTaggedPointerField(vector,
|
__ LoadTaggedField(vector, FieldMemOperand(vector, FixedArray::kHeaderSize));
|
||||||
FieldMemOperand(vector, FixedArray::kHeaderSize));
|
|
||||||
__ JumpIfSmi(vector, &allocate_vector);
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ push(kWasmInstanceRegister);
|
__ push(kWasmInstanceRegister);
|
||||||
@ -3504,16 +3495,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
|
|
||||||
__ push(receiver);
|
__ push(receiver);
|
||||||
// Push data from AccessorInfo.
|
// Push data from AccessorInfo.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
|
FieldMemOperand(callback, AccessorInfo::kDataOffset), r1);
|
||||||
__ push(scratch);
|
__ push(scratch);
|
||||||
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
|
||||||
__ Push(scratch, scratch);
|
__ Push(scratch, scratch);
|
||||||
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
__ Move(scratch, ExternalReference::isolate_address(masm->isolate()));
|
||||||
__ Push(scratch, holder);
|
__ Push(scratch, holder);
|
||||||
__ Push(Smi::zero()); // should_throw_on_error -> false
|
__ Push(Smi::zero()); // should_throw_on_error -> false
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(scratch,
|
||||||
scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
|
FieldMemOperand(callback, AccessorInfo::kNameOffset), r1);
|
||||||
__ push(scratch);
|
__ push(scratch);
|
||||||
|
|
||||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||||
|
@ -61,6 +61,7 @@ extern runtime WasmStringViewWtf8Slice(
|
|||||||
Context, ByteArray, Number, Number): String;
|
Context, ByteArray, Number, Number): String;
|
||||||
extern runtime WasmStringCompare(NoContext, String, String): Smi;
|
extern runtime WasmStringCompare(NoContext, String, String): Smi;
|
||||||
extern runtime WasmStringFromCodePoint(Context, Number): String;
|
extern runtime WasmStringFromCodePoint(Context, Number): String;
|
||||||
|
extern runtime WasmStringHash(NoContext, String): Smi;
|
||||||
extern runtime WasmJSToWasmObject(Context, JSAny, Smi): JSAny;
|
extern runtime WasmJSToWasmObject(Context, JSAny, Smi): JSAny;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -699,6 +700,10 @@ builtin ThrowWasmTrapArrayTooLarge(): JSAny {
|
|||||||
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapArrayTooLarge));
|
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapArrayTooLarge));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
builtin ThrowWasmTrapStringOffsetOutOfBounds(): JSAny {
|
||||||
|
tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapStringOffsetOutOfBounds));
|
||||||
|
}
|
||||||
|
|
||||||
macro TryNumberToIntptr(value: JSAny): intptr labels Failure {
|
macro TryNumberToIntptr(value: JSAny): intptr labels Failure {
|
||||||
typeswitch (value) {
|
typeswitch (value) {
|
||||||
case (s: Smi): {
|
case (s: Smi): {
|
||||||
@ -939,6 +944,13 @@ builtin WasmStringNewWtf16Array(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Contract: input is any string, output is a string that the TF operator
|
||||||
|
// "StringPrepareForGetCodeunit" can handle.
|
||||||
|
builtin WasmStringAsWtf16(str: String): String {
|
||||||
|
const cons = Cast<ConsString>(str) otherwise return str;
|
||||||
|
return Flatten(cons);
|
||||||
|
}
|
||||||
|
|
||||||
builtin WasmStringConst(index: uint32): String {
|
builtin WasmStringConst(index: uint32): String {
|
||||||
const instance = LoadInstanceFromFrame();
|
const instance = LoadInstanceFromFrame();
|
||||||
tail runtime::WasmStringConst(
|
tail runtime::WasmStringConst(
|
||||||
@ -1254,6 +1266,11 @@ builtin WasmStringFromCodePoint(codePoint: uint32): String {
|
|||||||
LoadContextFromFrame(), WasmUint32ToNumber(codePoint));
|
LoadContextFromFrame(), WasmUint32ToNumber(codePoint));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
builtin WasmStringHash(string: String): int32 {
|
||||||
|
const result = runtime::WasmStringHash(kNoContext, string);
|
||||||
|
return SmiToInt32(result);
|
||||||
|
}
|
||||||
|
|
||||||
builtin WasmExternInternalize(externObject: JSAny): JSAny {
|
builtin WasmExternInternalize(externObject: JSAny): JSAny {
|
||||||
const instance = LoadInstanceFromFrame();
|
const instance = LoadInstanceFromFrame();
|
||||||
const context = LoadContextFromInstance(instance);
|
const context = LoadContextFromInstance(instance);
|
||||||
|
@ -125,7 +125,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
|
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
|
|
||||||
__ ret(0);
|
__ ret(0);
|
||||||
|
|
||||||
@ -171,9 +171,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
const TaggedRegister shared_function_info(rbx);
|
const TaggedRegister shared_function_info(rbx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(shared_function_info,
|
||||||
shared_function_info,
|
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
|
||||||
__ movl(rbx,
|
__ movl(rbx,
|
||||||
FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset));
|
FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset));
|
||||||
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
|
__ DecodeField<SharedFunctionInfo::FunctionKindBits>(rbx);
|
||||||
@ -282,7 +281,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
|
|||||||
__ LeaveFrame(StackFrame::CONSTRUCT);
|
__ LeaveFrame(StackFrame::CONSTRUCT);
|
||||||
// Remove caller arguments from the stack and return.
|
// Remove caller arguments from the stack and return.
|
||||||
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
|
__ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
__ ret(0);
|
__ ret(0);
|
||||||
|
|
||||||
// If the result is a smi, it is *not* an object in the ECMA sense.
|
// If the result is a smi, it is *not* an object in the ECMA sense.
|
||||||
@ -701,7 +700,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
|
|||||||
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
|
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
|
||||||
__ j(not_equal, &done, Label::kNear);
|
__ j(not_equal, &done, Label::kNear);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
sfi_data, FieldOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
|
||||||
|
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
@ -729,9 +728,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
|
Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg;
|
||||||
|
|
||||||
// Load suspended function and context.
|
// Load suspended function and context.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rdi,
|
||||||
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||||
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
__ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
||||||
|
|
||||||
// Flood function if we are stepping.
|
// Flood function if we are stepping.
|
||||||
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
|
||||||
@ -768,12 +767,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// -----------------------------------
|
// -----------------------------------
|
||||||
|
|
||||||
// Copy the function arguments from the generator object's register file.
|
// Copy the function arguments from the generator object's register file.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rcx,
|
||||||
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ movzxwq(
|
__ movzxwq(
|
||||||
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
|
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
__ decq(rcx); // Exclude receiver.
|
__ decq(rcx); // Exclude receiver.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
|
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -781,24 +780,23 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ bind(&loop);
|
__ bind(&loop);
|
||||||
__ decq(rcx);
|
__ decq(rcx);
|
||||||
__ j(less, &done_loop, Label::kNear);
|
__ j(less, &done_loop, Label::kNear);
|
||||||
__ PushTaggedAnyField(
|
__ PushTaggedField(
|
||||||
FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
|
FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize),
|
||||||
decompr_scratch1);
|
decompr_scratch1);
|
||||||
__ jmp(&loop);
|
__ jmp(&loop);
|
||||||
__ bind(&done_loop);
|
__ bind(&done_loop);
|
||||||
|
|
||||||
// Push the receiver.
|
// Push the receiver.
|
||||||
__ PushTaggedPointerField(
|
__ PushTaggedField(FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
|
||||||
FieldOperand(rdx, JSGeneratorObject::kReceiverOffset),
|
decompr_scratch1);
|
||||||
decompr_scratch1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Underlying function needs to have bytecode available.
|
// Underlying function needs to have bytecode available.
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
Label is_baseline, ok;
|
Label is_baseline, ok;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
|
rcx, FieldOperand(rcx, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
|
GetSharedFunctionInfoBytecodeOrBaseline(masm, rcx, kScratchRegister,
|
||||||
&is_baseline);
|
&is_baseline);
|
||||||
@ -816,7 +814,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// Resume (Ignition/TurboFan) generator object.
|
// Resume (Ignition/TurboFan) generator object.
|
||||||
{
|
{
|
||||||
__ PushReturnAddressFrom(rax);
|
__ PushReturnAddressFrom(rax);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
rax, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ movzxwq(rax, FieldOperand(
|
__ movzxwq(rax, FieldOperand(
|
||||||
rax, SharedFunctionInfo::kFormalParameterCountOffset));
|
rax, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||||
@ -824,7 +822,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
// pass in the generator object. In ordinary calls, new.target is always
|
// pass in the generator object. In ordinary calls, new.target is always
|
||||||
// undefined because generator functions are non-constructable.
|
// undefined because generator functions are non-constructable.
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
|
__ LoadTaggedField(rcx, FieldOperand(rdi, JSFunction::kCodeOffset));
|
||||||
__ JumpCodeObject(rcx);
|
__ JumpCodeObject(rcx);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -837,8 +835,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ PushRoot(RootIndex::kTheHoleValue);
|
__ PushRoot(RootIndex::kTheHoleValue);
|
||||||
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
__ CallRuntime(Runtime::kDebugOnFunctionCall);
|
||||||
__ Pop(rdx);
|
__ Pop(rdx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rdi,
|
||||||
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ jmp(&stepping_prepared);
|
__ jmp(&stepping_prepared);
|
||||||
|
|
||||||
@ -848,8 +846,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
|||||||
__ Push(rdx);
|
__ Push(rdx);
|
||||||
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
__ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
|
||||||
__ Pop(rdx);
|
__ Pop(rdx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rdi,
|
||||||
rdi, FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
FieldOperand(rdx, JSGeneratorObject::kFunctionOffset));
|
||||||
}
|
}
|
||||||
__ jmp(&stepping_prepared);
|
__ jmp(&stepping_prepared);
|
||||||
|
|
||||||
@ -890,8 +888,8 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
|
|||||||
__ leave();
|
__ leave();
|
||||||
|
|
||||||
// Drop receiver + arguments.
|
// Drop receiver + arguments.
|
||||||
__ DropArguments(params_size, scratch2, TurboAssembler::kCountIsBytes,
|
__ DropArguments(params_size, scratch2, MacroAssembler::kCountIsBytes,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Tail-call |function_id| if |actual_state| == |expected_state|
|
// Tail-call |function_id| if |actual_state| == |expected_state|
|
||||||
@ -1019,13 +1017,12 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
// Get the bytecode array from the function object and load it into
|
// Get the bytecode array from the function object and load it into
|
||||||
// kInterpreterBytecodeArrayRegister.
|
// kInterpreterBytecodeArrayRegister.
|
||||||
const TaggedRegister shared_function_info(kScratchRegister);
|
const TaggedRegister shared_function_info(kScratchRegister);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
shared_function_info,
|
shared_function_info,
|
||||||
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(kInterpreterBytecodeArrayRegister,
|
||||||
kInterpreterBytecodeArrayRegister,
|
FieldOperand(shared_function_info,
|
||||||
FieldOperand(shared_function_info,
|
SharedFunctionInfo::kFunctionDataOffset));
|
||||||
SharedFunctionInfo::kFunctionDataOffset));
|
|
||||||
|
|
||||||
Label is_baseline;
|
Label is_baseline;
|
||||||
GetSharedFunctionInfoBytecodeOrBaseline(
|
GetSharedFunctionInfoBytecodeOrBaseline(
|
||||||
@ -1040,10 +1037,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
|
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
TaggedRegister feedback_cell(feedback_vector);
|
TaggedRegister feedback_cell(feedback_vector);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_cell,
|
||||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(feedback_vector,
|
__ LoadTaggedField(feedback_vector,
|
||||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||||
|
|
||||||
Label push_stack_frame;
|
Label push_stack_frame;
|
||||||
// Check if feedback vector is valid. If valid, check for optimized code
|
// Check if feedback vector is valid. If valid, check for optimized code
|
||||||
@ -1220,10 +1217,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(
|
|||||||
{
|
{
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
TaggedRegister feedback_cell(feedback_vector);
|
TaggedRegister feedback_cell(feedback_vector);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_cell,
|
||||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(feedback_vector,
|
__ LoadTaggedField(feedback_vector,
|
||||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
@ -1265,7 +1262,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
|
|||||||
kSystemPointerSize));
|
kSystemPointerSize));
|
||||||
// Push the arguments.
|
// Push the arguments.
|
||||||
__ PushArray(start_address, num_args, scratch,
|
__ PushArray(start_address, num_args, scratch,
|
||||||
TurboAssembler::PushArrayOrder::kReverse);
|
MacroAssembler::PushArrayOrder::kReverse);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -1417,16 +1414,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
|||||||
// trampoline.
|
// trampoline.
|
||||||
__ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
|
__ movq(rbx, Operand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||||
const TaggedRegister shared_function_info(rbx);
|
const TaggedRegister shared_function_info(rbx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(shared_function_info,
|
||||||
shared_function_info,
|
FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
|
||||||
FieldOperand(rbx, JSFunction::kSharedFunctionInfoOffset));
|
__ LoadTaggedField(rbx,
|
||||||
__ LoadTaggedPointerField(
|
FieldOperand(shared_function_info,
|
||||||
rbx, FieldOperand(shared_function_info,
|
SharedFunctionInfo::kFunctionDataOffset));
|
||||||
SharedFunctionInfo::kFunctionDataOffset));
|
|
||||||
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
|
__ CmpObjectType(rbx, INTERPRETER_DATA_TYPE, kScratchRegister);
|
||||||
__ j(not_equal, &builtin_trampoline, Label::kNear);
|
__ j(not_equal, &builtin_trampoline, Label::kNear);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
|
rbx, FieldOperand(rbx, InterpreterData::kInterpreterTrampolineOffset));
|
||||||
__ LoadCodeEntry(rbx, rbx);
|
__ LoadCodeEntry(rbx, rbx);
|
||||||
__ jmp(&trampoline_loaded, Label::kNear);
|
__ jmp(&trampoline_loaded, Label::kNear);
|
||||||
@ -1555,10 +1551,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
|
|||||||
BaselineOutOfLinePrologueDescriptor::kClosure);
|
BaselineOutOfLinePrologueDescriptor::kClosure);
|
||||||
// Load the feedback vector from the closure.
|
// Load the feedback vector from the closure.
|
||||||
TaggedRegister feedback_cell(feedback_vector);
|
TaggedRegister feedback_cell(feedback_vector);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_cell,
|
||||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(feedback_vector,
|
__ LoadTaggedField(feedback_vector,
|
||||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||||
__ AssertFeedbackVector(feedback_vector);
|
__ AssertFeedbackVector(feedback_vector);
|
||||||
|
|
||||||
// Check the tiering state.
|
// Check the tiering state.
|
||||||
@ -1814,8 +1810,8 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
__ bind(&no_this_arg);
|
__ bind(&no_this_arg);
|
||||||
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
|
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -1919,8 +1915,8 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
|
|||||||
__ movq(rbx, args[3]); // argumentsList
|
__ movq(rbx, args[3]); // argumentsList
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
|
__ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -1971,8 +1967,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
|
|||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
__ DropArgumentsAndPushNewReceiver(
|
__ DropArgumentsAndPushNewReceiver(
|
||||||
rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
|
rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
|
||||||
TurboAssembler::kCountIsInteger,
|
MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
@ -2097,8 +2093,8 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
|
|||||||
__ cmpl(current, num);
|
__ cmpl(current, num);
|
||||||
__ j(equal, &done, Label::kNear);
|
__ j(equal, &done, Label::kNear);
|
||||||
// Turn the hole into undefined as we go.
|
// Turn the hole into undefined as we go.
|
||||||
__ LoadAnyTaggedField(value, FieldOperand(src, current, times_tagged_size,
|
__ LoadTaggedField(value, FieldOperand(src, current, times_tagged_size,
|
||||||
FixedArray::kHeaderSize));
|
FixedArray::kHeaderSize));
|
||||||
__ CompareRoot(value, RootIndex::kTheHoleValue);
|
__ CompareRoot(value, RootIndex::kTheHoleValue);
|
||||||
__ j(not_equal, &push, Label::kNear);
|
__ j(not_equal, &push, Label::kNear);
|
||||||
__ LoadRoot(value, RootIndex::kUndefinedValue);
|
__ LoadRoot(value, RootIndex::kUndefinedValue);
|
||||||
@ -2213,8 +2209,8 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
StackArgumentsAccessor args(rax);
|
StackArgumentsAccessor args(rax);
|
||||||
__ AssertCallableFunction(rdi);
|
__ AssertCallableFunction(rdi);
|
||||||
|
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rdx,
|
||||||
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
// ----------- S t a t e -------------
|
// ----------- S t a t e -------------
|
||||||
// -- rax : the number of arguments
|
// -- rax : the number of arguments
|
||||||
// -- rdx : the shared function info.
|
// -- rdx : the shared function info.
|
||||||
@ -2224,7 +2220,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
// Enter the context of the function; ToObject has to run in the function
|
// Enter the context of the function; ToObject has to run in the function
|
||||||
// context, and we also need to take the global proxy from the function
|
// context, and we also need to take the global proxy from the function
|
||||||
// context in case of conversion.
|
// context in case of conversion.
|
||||||
__ LoadTaggedPointerField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
__ LoadTaggedField(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
|
||||||
// We need to convert the receiver for non-native sloppy mode functions.
|
// We need to convert the receiver for non-native sloppy mode functions.
|
||||||
Label done_convert;
|
Label done_convert;
|
||||||
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
|
__ testl(FieldOperand(rdx, SharedFunctionInfo::kFlagsOffset),
|
||||||
@ -2281,7 +2277,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
|
|||||||
__ Pop(rax);
|
__ Pop(rax);
|
||||||
__ SmiUntagUnsigned(rax);
|
__ SmiUntagUnsigned(rax);
|
||||||
}
|
}
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ bind(&convert_receiver);
|
__ bind(&convert_receiver);
|
||||||
}
|
}
|
||||||
@ -2312,8 +2308,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Load [[BoundArguments]] into rcx and length of that into rbx.
|
// Load [[BoundArguments]] into rcx and length of that into rbx.
|
||||||
Label no_bound_arguments;
|
Label no_bound_arguments;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(rcx,
|
||||||
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagFieldUnsigned(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
__ SmiUntagFieldUnsigned(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||||
__ testl(rbx, rbx);
|
__ testl(rbx, rbx);
|
||||||
__ j(zero, &no_bound_arguments);
|
__ j(zero, &no_bound_arguments);
|
||||||
@ -2354,7 +2350,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
// Push [[BoundArguments]] to the stack.
|
// Push [[BoundArguments]] to the stack.
|
||||||
{
|
{
|
||||||
Label loop;
|
Label loop;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
rcx, FieldOperand(rdi, JSBoundFunction::kBoundArgumentsOffset));
|
||||||
__ SmiUntagFieldUnsigned(rbx,
|
__ SmiUntagFieldUnsigned(rbx,
|
||||||
FieldOperand(rcx, FixedArray::kLengthOffset));
|
FieldOperand(rcx, FixedArray::kLengthOffset));
|
||||||
@ -2364,9 +2360,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
|||||||
// offset in order to be able to move decl(rbx) right before the loop
|
// offset in order to be able to move decl(rbx) right before the loop
|
||||||
// condition. This is necessary in order to avoid flags corruption by
|
// condition. This is necessary in order to avoid flags corruption by
|
||||||
// pointer decompression code.
|
// pointer decompression code.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(r12,
|
||||||
r12, FieldOperand(rcx, rbx, times_tagged_size,
|
FieldOperand(rcx, rbx, times_tagged_size,
|
||||||
FixedArray::kHeaderSize - kTaggedSize));
|
FixedArray::kHeaderSize - kTaggedSize));
|
||||||
__ Push(r12);
|
__ Push(r12);
|
||||||
__ decl(rbx);
|
__ decl(rbx);
|
||||||
__ j(greater, &loop);
|
__ j(greater, &loop);
|
||||||
@ -2391,15 +2387,14 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Patch the receiver to [[BoundThis]].
|
// Patch the receiver to [[BoundThis]].
|
||||||
StackArgumentsAccessor args(rax);
|
StackArgumentsAccessor args(rax);
|
||||||
__ LoadAnyTaggedField(rbx,
|
__ LoadTaggedField(rbx, FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
|
||||||
FieldOperand(rdi, JSBoundFunction::kBoundThisOffset));
|
|
||||||
__ movq(args.GetReceiverOperand(), rbx);
|
__ movq(args.GetReceiverOperand(), rbx);
|
||||||
|
|
||||||
// Push the [[BoundArguments]] onto the stack.
|
// Push the [[BoundArguments]] onto the stack.
|
||||||
Generate_PushBoundArguments(masm);
|
Generate_PushBoundArguments(masm);
|
||||||
|
|
||||||
// Call the [[BoundTargetFunction]] via the Call builtin.
|
// Call the [[BoundTargetFunction]] via the Call builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
__ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
@ -2498,9 +2493,8 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
|
|||||||
|
|
||||||
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
// Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
|
||||||
const TaggedRegister shared_function_info(rcx);
|
const TaggedRegister shared_function_info(rcx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(shared_function_info,
|
||||||
shared_function_info,
|
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
||||||
FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
|
|
||||||
__ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset),
|
__ testl(FieldOperand(shared_function_info, SharedFunctionInfo::kFlagsOffset),
|
||||||
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
Immediate(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
|
__ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
|
||||||
@ -2528,13 +2522,13 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
|
|||||||
Label done;
|
Label done;
|
||||||
__ cmpq(rdi, rdx);
|
__ cmpq(rdi, rdx);
|
||||||
__ j(not_equal, &done, Label::kNear);
|
__ j(not_equal, &done, Label::kNear);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
rdx, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
// Construct the [[BoundTargetFunction]] via the Construct builtin.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
rdi, FieldOperand(rdi, JSBoundFunction::kBoundTargetFunctionOffset));
|
||||||
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
__ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2677,7 +2671,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
|
|||||||
|
|
||||||
// Load deoptimization data from the code object.
|
// Load deoptimization data from the code object.
|
||||||
const TaggedRegister deopt_data(rbx);
|
const TaggedRegister deopt_data(rbx);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
deopt_data,
|
deopt_data,
|
||||||
FieldOperand(
|
FieldOperand(
|
||||||
rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
rax, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
|
||||||
@ -2776,12 +2770,11 @@ void Builtins::Generate_WasmLiftoffFrameSetup(MacroAssembler* masm) {
|
|||||||
__ Push(rbp);
|
__ Push(rbp);
|
||||||
__ Move(rbp, rsp);
|
__ Move(rbp, rsp);
|
||||||
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
|
__ Push(Immediate(StackFrame::TypeToMarker(StackFrame::WASM)));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(vector,
|
||||||
vector, FieldOperand(kWasmInstanceRegister,
|
FieldOperand(kWasmInstanceRegister,
|
||||||
WasmInstanceObject::kFeedbackVectorsOffset));
|
WasmInstanceObject::kFeedbackVectorsOffset));
|
||||||
__ LoadTaggedPointerField(vector,
|
__ LoadTaggedField(vector, FieldOperand(vector, func_index, times_tagged_size,
|
||||||
FieldOperand(vector, func_index, times_tagged_size,
|
FixedArray::kHeaderSize));
|
||||||
FixedArray::kHeaderSize));
|
|
||||||
Label allocate_vector, done;
|
Label allocate_vector, done;
|
||||||
__ JumpIfSmi(vector, &allocate_vector);
|
__ JumpIfSmi(vector, &allocate_vector);
|
||||||
__ bind(&done);
|
__ bind(&done);
|
||||||
@ -2931,7 +2924,7 @@ void PrepareForBuiltinCall(MacroAssembler* masm, MemOperand GCScanSlotPlace,
|
|||||||
__ pushq(function_data);
|
__ pushq(function_data);
|
||||||
// We had to prepare the parameters for the Call: we have to put the context
|
// We had to prepare the parameters for the Call: we have to put the context
|
||||||
// into rsi.
|
// into rsi.
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
rsi,
|
rsi,
|
||||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||||
WasmInstanceObject::kNativeContextOffset)));
|
WasmInstanceObject::kNativeContextOffset)));
|
||||||
@ -3012,7 +3005,7 @@ void AllocateSuspender(MacroAssembler* masm, Register function_data,
|
|||||||
__ Move(GCScanSlotPlace, 2);
|
__ Move(GCScanSlotPlace, 2);
|
||||||
__ Push(wasm_instance);
|
__ Push(wasm_instance);
|
||||||
__ Push(function_data);
|
__ Push(function_data);
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
kContextRegister,
|
kContextRegister,
|
||||||
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged(
|
||||||
WasmInstanceObject::kNativeContextOffset)));
|
WasmInstanceObject::kNativeContextOffset)));
|
||||||
@ -3052,7 +3045,7 @@ void ReloadParentContinuation(MacroAssembler* masm, Register wasm_instance,
|
|||||||
wasm::JumpBuffer::Retired);
|
wasm::JumpBuffer::Retired);
|
||||||
|
|
||||||
Register parent = tmp2;
|
Register parent = tmp2;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
parent,
|
parent,
|
||||||
FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
|
FieldOperand(active_continuation, WasmContinuationObject::kParentOffset));
|
||||||
|
|
||||||
@ -3083,7 +3076,7 @@ void RestoreParentSuspender(MacroAssembler* masm, Register tmp1,
|
|||||||
__ StoreTaggedSignedField(
|
__ StoreTaggedSignedField(
|
||||||
FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
|
FieldOperand(suspender, WasmSuspenderObject::kStateOffset),
|
||||||
Smi::FromInt(WasmSuspenderObject::kInactive));
|
Smi::FromInt(WasmSuspenderObject::kInactive));
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
suspender, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||||
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
__ CompareRoot(suspender, RootIndex::kUndefinedValue);
|
||||||
Label undefined;
|
Label undefined;
|
||||||
@ -3111,19 +3104,19 @@ void LoadFunctionDataAndWasmInstance(MacroAssembler* masm,
|
|||||||
Register wasm_instance) {
|
Register wasm_instance) {
|
||||||
Register closure = function_data;
|
Register closure = function_data;
|
||||||
Register shared_function_info = closure;
|
Register shared_function_info = closure;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
shared_function_info,
|
shared_function_info,
|
||||||
MemOperand(
|
MemOperand(
|
||||||
closure,
|
closure,
|
||||||
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
||||||
closure = no_reg;
|
closure = no_reg;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
MemOperand(shared_function_info,
|
MemOperand(shared_function_info,
|
||||||
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
|
SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag));
|
||||||
shared_function_info = no_reg;
|
shared_function_info = no_reg;
|
||||||
|
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
wasm_instance,
|
wasm_instance,
|
||||||
MemOperand(function_data,
|
MemOperand(function_data,
|
||||||
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
|
WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag));
|
||||||
@ -3224,7 +3217,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
Register suspender = rax; // Fixed.
|
Register suspender = rax; // Fixed.
|
||||||
__ movq(MemOperand(rbp, kSuspenderOffset), suspender);
|
__ movq(MemOperand(rbp, kSuspenderOffset), suspender);
|
||||||
Register target_continuation = rax;
|
Register target_continuation = rax;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
target_continuation,
|
target_continuation,
|
||||||
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
suspender = no_reg;
|
suspender = no_reg;
|
||||||
@ -3728,7 +3721,7 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
|
|
||||||
Register function_entry = function_data;
|
Register function_entry = function_data;
|
||||||
Register scratch = r12;
|
Register scratch = r12;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_entry,
|
function_entry,
|
||||||
FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset));
|
FieldOperand(function_data, WasmExportedFunctionData::kInternalOffset));
|
||||||
__ LoadExternalPointerField(
|
__ LoadExternalPointerField(
|
||||||
@ -3812,8 +3805,8 @@ void GenericJSToWasmWrapperHelper(MacroAssembler* masm, bool stack_switch) {
|
|||||||
// expected to be on the top of the stack).
|
// expected to be on the top of the stack).
|
||||||
// We cannot use just the ret instruction for this, because we cannot pass the
|
// We cannot use just the ret instruction for this, because we cannot pass the
|
||||||
// number of slots to remove in a Register as an argument.
|
// number of slots to remove in a Register as an argument.
|
||||||
__ DropArguments(param_count, rbx, TurboAssembler::kCountIsInteger,
|
__ DropArguments(param_count, rbx, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountExcludesReceiver);
|
MacroAssembler::kCountExcludesReceiver);
|
||||||
__ ret(0);
|
__ ret(0);
|
||||||
|
|
||||||
// --------------------------------------------------------------------------
|
// --------------------------------------------------------------------------
|
||||||
@ -4081,7 +4074,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||||||
// live: [rax, rbx, rcx]
|
// live: [rax, rbx, rcx]
|
||||||
|
|
||||||
Register suspender_continuation = rdx;
|
Register suspender_continuation = rdx;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender_continuation,
|
suspender_continuation,
|
||||||
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
@ -4102,12 +4095,12 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) {
|
|||||||
// Update roots.
|
// Update roots.
|
||||||
// -------------------------------------------
|
// -------------------------------------------
|
||||||
Register caller = rcx;
|
Register caller = rcx;
|
||||||
__ LoadAnyTaggedField(caller,
|
__ LoadTaggedField(caller,
|
||||||
FieldOperand(suspender_continuation,
|
FieldOperand(suspender_continuation,
|
||||||
WasmContinuationObject::kParentOffset));
|
WasmContinuationObject::kParentOffset));
|
||||||
__ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller);
|
__ movq(masm->RootAsOperand(RootIndex::kActiveContinuation), caller);
|
||||||
Register parent = rdx;
|
Register parent = rdx;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
parent, FieldOperand(suspender, WasmSuspenderObject::kParentOffset));
|
||||||
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent);
|
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), parent);
|
||||||
parent = no_reg;
|
parent = no_reg;
|
||||||
@ -4172,19 +4165,19 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
// Load suspender from closure.
|
// Load suspender from closure.
|
||||||
// -------------------------------------------
|
// -------------------------------------------
|
||||||
Register sfi = closure;
|
Register sfi = closure;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
sfi,
|
sfi,
|
||||||
MemOperand(
|
MemOperand(
|
||||||
closure,
|
closure,
|
||||||
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction()));
|
||||||
Register function_data = sfi;
|
Register function_data = sfi;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
function_data,
|
function_data,
|
||||||
FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
FieldOperand(sfi, SharedFunctionInfo::kFunctionDataOffset));
|
||||||
// The write barrier uses a fixed register for the host object (rdi). The next
|
// The write barrier uses a fixed register for the host object (rdi). The next
|
||||||
// barrier is on the suspender, so load it in rdi directly.
|
// barrier is on the suspender, so load it in rdi directly.
|
||||||
Register suspender = rdi;
|
Register suspender = rdi;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
suspender, FieldOperand(function_data, WasmResumeData::kSuspenderOffset));
|
suspender, FieldOperand(function_data, WasmResumeData::kSuspenderOffset));
|
||||||
// Check the suspender state.
|
// Check the suspender state.
|
||||||
Label suspender_is_suspended;
|
Label suspender_is_suspended;
|
||||||
@ -4233,7 +4226,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) {
|
|||||||
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
|
__ movq(masm->RootAsOperand(RootIndex::kActiveSuspender), suspender);
|
||||||
|
|
||||||
Register target_continuation = suspender;
|
Register target_continuation = suspender;
|
||||||
__ LoadAnyTaggedField(
|
__ LoadTaggedField(
|
||||||
target_continuation,
|
target_continuation,
|
||||||
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
FieldOperand(suspender, WasmSuspenderObject::kContinuationOffset));
|
||||||
suspender = no_reg;
|
suspender = no_reg;
|
||||||
@ -4848,16 +4841,16 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
|
|||||||
// Insert additional parameters into the stack frame above return address.
|
// Insert additional parameters into the stack frame above return address.
|
||||||
__ PopReturnAddressTo(scratch);
|
__ PopReturnAddressTo(scratch);
|
||||||
__ Push(receiver);
|
__ Push(receiver);
|
||||||
__ PushTaggedAnyField(FieldOperand(callback, AccessorInfo::kDataOffset),
|
__ PushTaggedField(FieldOperand(callback, AccessorInfo::kDataOffset),
|
||||||
decompr_scratch1);
|
decompr_scratch1);
|
||||||
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
|
__ LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
|
||||||
__ Push(kScratchRegister); // return value
|
__ Push(kScratchRegister); // return value
|
||||||
__ Push(kScratchRegister); // return value default
|
__ Push(kScratchRegister); // return value default
|
||||||
__ PushAddress(ExternalReference::isolate_address(masm->isolate()));
|
__ PushAddress(ExternalReference::isolate_address(masm->isolate()));
|
||||||
__ Push(holder);
|
__ Push(holder);
|
||||||
__ Push(Smi::zero()); // should_throw_on_error -> false
|
__ Push(Smi::zero()); // should_throw_on_error -> false
|
||||||
__ PushTaggedPointerField(FieldOperand(callback, AccessorInfo::kNameOffset),
|
__ PushTaggedField(FieldOperand(callback, AccessorInfo::kNameOffset),
|
||||||
decompr_scratch1);
|
decompr_scratch1);
|
||||||
__ PushReturnAddressFrom(scratch);
|
__ PushReturnAddressFrom(scratch);
|
||||||
|
|
||||||
// v8::PropertyCallbackInfo::args_ array and name handle.
|
// v8::PropertyCallbackInfo::args_ array and name handle.
|
||||||
@ -5129,12 +5122,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
// Get the InstructionStream object from the shared function info.
|
// Get the InstructionStream object from the shared function info.
|
||||||
Register code_obj = rbx;
|
Register code_obj = rbx;
|
||||||
TaggedRegister shared_function_info(code_obj);
|
TaggedRegister shared_function_info(code_obj);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
shared_function_info,
|
shared_function_info,
|
||||||
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(code_obj,
|
||||||
code_obj, FieldOperand(shared_function_info,
|
FieldOperand(shared_function_info,
|
||||||
SharedFunctionInfo::kFunctionDataOffset));
|
SharedFunctionInfo::kFunctionDataOffset));
|
||||||
|
|
||||||
// Check if we have baseline code. For OSR entry it is safe to assume we
|
// Check if we have baseline code. For OSR entry it is safe to assume we
|
||||||
// always have baseline code.
|
// always have baseline code.
|
||||||
@ -5166,10 +5159,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
|
|||||||
Register feedback_vector = r11;
|
Register feedback_vector = r11;
|
||||||
|
|
||||||
TaggedRegister feedback_cell(feedback_vector);
|
TaggedRegister feedback_cell(feedback_vector);
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(feedback_cell,
|
||||||
feedback_cell, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||||
__ LoadTaggedPointerField(feedback_vector,
|
__ LoadTaggedField(feedback_vector,
|
||||||
FieldOperand(feedback_cell, Cell::kValueOffset));
|
FieldOperand(feedback_cell, Cell::kValueOffset));
|
||||||
|
|
||||||
Label install_baseline_code;
|
Label install_baseline_code;
|
||||||
// Check if feedback vector is valid. If not, call prepare for baseline to
|
// Check if feedback vector is valid. If not, call prepare for baseline to
|
||||||
|
@ -1435,7 +1435,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
friend class Assembler;
|
friend class Assembler;
|
||||||
friend class TurboAssembler;
|
friend class MacroAssembler;
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
bool CanAcquireVfp() const;
|
bool CanAcquireVfp() const;
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -43,9 +43,9 @@ enum TargetAddressStorageMode {
|
|||||||
NEVER_INLINE_TARGET_ADDRESS
|
NEVER_INLINE_TARGET_ADDRESS
|
||||||
};
|
};
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||||
public:
|
public:
|
||||||
using TurboAssemblerBase::TurboAssemblerBase;
|
using MacroAssemblerBase::MacroAssemblerBase;
|
||||||
|
|
||||||
// Activation support.
|
// Activation support.
|
||||||
void EnterFrame(StackFrame::Type type,
|
void EnterFrame(StackFrame::Type type,
|
||||||
@ -596,49 +596,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src);
|
void F64x2ConvertLowI32x4U(QwNeonRegister dst, QwNeonRegister src);
|
||||||
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src);
|
void F64x2PromoteLowF32x4(QwNeonRegister dst, QwNeonRegister src);
|
||||||
|
|
||||||
private:
|
|
||||||
// Compare single values and then load the fpscr flags to a register.
|
|
||||||
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
|
|
||||||
const SwVfpRegister src2,
|
|
||||||
const Register fpscr_flags,
|
|
||||||
const Condition cond = al);
|
|
||||||
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
|
|
||||||
const Register fpscr_flags,
|
|
||||||
const Condition cond = al);
|
|
||||||
|
|
||||||
// Compare double values and then load the fpscr flags to a register.
|
|
||||||
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
|
|
||||||
const DwVfpRegister src2,
|
|
||||||
const Register fpscr_flags,
|
|
||||||
const Condition cond = al);
|
|
||||||
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
|
|
||||||
const Register fpscr_flags,
|
|
||||||
const Condition cond = al);
|
|
||||||
|
|
||||||
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
|
|
||||||
|
|
||||||
// Implementation helpers for FloatMin and FloatMax.
|
|
||||||
template <typename T>
|
|
||||||
void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
|
|
||||||
template <typename T>
|
|
||||||
void FloatMinHelper(T result, T left, T right, Label* out_of_line);
|
|
||||||
template <typename T>
|
|
||||||
void FloatMaxOutOfLineHelper(T result, T left, T right);
|
|
||||||
template <typename T>
|
|
||||||
void FloatMinOutOfLineHelper(T result, T left, T right);
|
|
||||||
|
|
||||||
int CalculateStackPassedWords(int num_reg_arguments,
|
|
||||||
int num_double_arguments);
|
|
||||||
|
|
||||||
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
|
||||||
int num_double_arguments);
|
|
||||||
};
|
|
||||||
|
|
||||||
// MacroAssembler implements a collection of frequently used macros.
|
|
||||||
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|
||||||
public:
|
|
||||||
using TurboAssembler::TurboAssembler;
|
|
||||||
|
|
||||||
void Mls(Register dst, Register src1, Register src2, Register srcA,
|
void Mls(Register dst, Register src1, Register src2, Register srcA,
|
||||||
Condition cond = al);
|
Condition cond = al);
|
||||||
void And(Register dst, Register src1, const Operand& src2,
|
void And(Register dst, Register src1, const Operand& src2,
|
||||||
@ -899,6 +856,42 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
Register actual_parameter_count, Label* done,
|
Register actual_parameter_count, Label* done,
|
||||||
InvokeType type);
|
InvokeType type);
|
||||||
|
|
||||||
|
// Compare single values and then load the fpscr flags to a register.
|
||||||
|
void VFPCompareAndLoadFlags(const SwVfpRegister src1,
|
||||||
|
const SwVfpRegister src2,
|
||||||
|
const Register fpscr_flags,
|
||||||
|
const Condition cond = al);
|
||||||
|
void VFPCompareAndLoadFlags(const SwVfpRegister src1, const float src2,
|
||||||
|
const Register fpscr_flags,
|
||||||
|
const Condition cond = al);
|
||||||
|
|
||||||
|
// Compare double values and then load the fpscr flags to a register.
|
||||||
|
void VFPCompareAndLoadFlags(const DwVfpRegister src1,
|
||||||
|
const DwVfpRegister src2,
|
||||||
|
const Register fpscr_flags,
|
||||||
|
const Condition cond = al);
|
||||||
|
void VFPCompareAndLoadFlags(const DwVfpRegister src1, const double src2,
|
||||||
|
const Register fpscr_flags,
|
||||||
|
const Condition cond = al);
|
||||||
|
|
||||||
|
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
|
||||||
|
|
||||||
|
// Implementation helpers for FloatMin and FloatMax.
|
||||||
|
template <typename T>
|
||||||
|
void FloatMaxHelper(T result, T left, T right, Label* out_of_line);
|
||||||
|
template <typename T>
|
||||||
|
void FloatMinHelper(T result, T left, T right, Label* out_of_line);
|
||||||
|
template <typename T>
|
||||||
|
void FloatMaxOutOfLineHelper(T result, T left, T right);
|
||||||
|
template <typename T>
|
||||||
|
void FloatMinOutOfLineHelper(T result, T left, T right);
|
||||||
|
|
||||||
|
int CalculateStackPassedWords(int num_reg_arguments,
|
||||||
|
int num_double_arguments);
|
||||||
|
|
||||||
|
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
||||||
|
int num_double_arguments);
|
||||||
|
|
||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
|
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -659,8 +659,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
|||||||
Tagged_t compressed =
|
Tagged_t compressed =
|
||||||
Assembler::target_compressed_address_at(pc_, constant_pool_);
|
Assembler::target_compressed_address_at(pc_, constant_pool_);
|
||||||
DCHECK(!HAS_SMI_TAG(compressed));
|
DCHECK(!HAS_SMI_TAG(compressed));
|
||||||
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
Object obj(
|
||||||
compressed));
|
V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
|
||||||
// Embedding of compressed InstructionStream objects must not happen when
|
// Embedding of compressed InstructionStream objects must not happen when
|
||||||
// external code space is enabled, because Codes must be used
|
// external code space is enabled, because Codes must be used
|
||||||
// instead.
|
// instead.
|
||||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -146,9 +146,9 @@ enum PreShiftImmMode {
|
|||||||
// platforms are updated.
|
// platforms are updated.
|
||||||
enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
|
enum class StackLimitKind { kInterruptStackLimit, kRealStackLimit };
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||||
public:
|
public:
|
||||||
using TurboAssemblerBase::TurboAssemblerBase;
|
using MacroAssemblerBase::MacroAssemblerBase;
|
||||||
|
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
void set_allow_macro_instructions(bool value) {
|
void set_allow_macro_instructions(bool value) {
|
||||||
@ -1400,14 +1400,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Pointer compression Support
|
// Pointer compression Support
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
|
||||||
// compression is enabled.
|
|
||||||
void LoadTaggedPointerField(const Register& destination,
|
|
||||||
const MemOperand& field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
void LoadAnyTaggedField(const Register& destination,
|
void LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
|
|
||||||
// Loads a field containing a tagged signed value and decompresses it if
|
// Loads a field containing a tagged signed value and decompresses it if
|
||||||
// necessary.
|
// necessary.
|
||||||
@ -1432,24 +1427,16 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
|
|
||||||
void DecompressTaggedSigned(const Register& destination,
|
void DecompressTaggedSigned(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
void DecompressTaggedPointer(const Register& destination,
|
void DecompressTagged(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
void DecompressTaggedPointer(const Register& destination,
|
void DecompressTagged(const Register& destination, const Register& source);
|
||||||
const Register& source);
|
void DecompressTagged(const Register& destination, Tagged_t immediate);
|
||||||
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
|
|
||||||
void DecompressAnyTagged(const Register& destination,
|
|
||||||
const MemOperand& field_operand);
|
|
||||||
|
|
||||||
void AtomicDecompressTaggedSigned(const Register& destination,
|
void AtomicDecompressTaggedSigned(const Register& destination,
|
||||||
const Register& base, const Register& index,
|
const Register& base, const Register& index,
|
||||||
const Register& temp);
|
const Register& temp);
|
||||||
void AtomicDecompressTaggedPointer(const Register& destination,
|
void AtomicDecompressTagged(const Register& destination, const Register& base,
|
||||||
const Register& base,
|
const Register& index, const Register& temp);
|
||||||
const Register& index,
|
|
||||||
const Register& temp);
|
|
||||||
void AtomicDecompressAnyTagged(const Register& destination,
|
|
||||||
const Register& base, const Register& index,
|
|
||||||
const Register& temp);
|
|
||||||
|
|
||||||
// Restore FP and LR from the values stored in the current frame. This will
|
// Restore FP and LR from the values stored in the current frame. This will
|
||||||
// authenticate the LR when pointer authentication is enabled.
|
// authenticate the LR when pointer authentication is enabled.
|
||||||
@ -1484,81 +1471,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
ExternalPointerTag tag,
|
ExternalPointerTag tag,
|
||||||
Register isolate_root = Register::no_reg());
|
Register isolate_root = Register::no_reg());
|
||||||
|
|
||||||
protected:
|
|
||||||
// The actual Push and Pop implementations. These don't generate any code
|
|
||||||
// other than that required for the push or pop. This allows
|
|
||||||
// (Push|Pop)CPURegList to bundle together run-time assertions for a large
|
|
||||||
// block of registers.
|
|
||||||
//
|
|
||||||
// Note that size is per register, and is specified in bytes.
|
|
||||||
void PushHelper(int count, int size, const CPURegister& src0,
|
|
||||||
const CPURegister& src1, const CPURegister& src2,
|
|
||||||
const CPURegister& src3);
|
|
||||||
void PopHelper(int count, int size, const CPURegister& dst0,
|
|
||||||
const CPURegister& dst1, const CPURegister& dst2,
|
|
||||||
const CPURegister& dst3);
|
|
||||||
|
|
||||||
void ConditionalCompareMacro(const Register& rn, const Operand& operand,
|
|
||||||
StatusFlags nzcv, Condition cond,
|
|
||||||
ConditionalCompareOp op);
|
|
||||||
|
|
||||||
void AddSubWithCarryMacro(const Register& rd, const Register& rn,
|
|
||||||
const Operand& operand, FlagsUpdate S,
|
|
||||||
AddSubWithCarryOp op);
|
|
||||||
|
|
||||||
// Call Printf. On a native build, a simple call will be generated, but if the
|
|
||||||
// simulator is being used then a suitable pseudo-instruction is used. The
|
|
||||||
// arguments and stack must be prepared by the caller as for a normal AAPCS64
|
|
||||||
// call to 'printf'.
|
|
||||||
//
|
|
||||||
// The 'args' argument should point to an array of variable arguments in their
|
|
||||||
// proper PCS registers (and in calling order). The argument registers can
|
|
||||||
// have mixed types. The format string (x0) should not be included.
|
|
||||||
void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
|
|
||||||
|
|
||||||
private:
|
|
||||||
#if DEBUG
|
|
||||||
// Tell whether any of the macro instruction can be used. When false the
|
|
||||||
// MacroAssembler will assert if a method which can emit a variable number
|
|
||||||
// of instructions is called.
|
|
||||||
bool allow_macro_instructions_ = true;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Scratch registers available for use by the MacroAssembler.
|
|
||||||
CPURegList tmp_list_ = DefaultTmpList();
|
|
||||||
CPURegList fptmp_list_ = DefaultFPTmpList();
|
|
||||||
|
|
||||||
// Helps resolve branching to labels potentially out of range.
|
|
||||||
// If the label is not bound, it registers the information necessary to later
|
|
||||||
// be able to emit a veneer for this branch if necessary.
|
|
||||||
// If the label is bound, it returns true if the label (or the previous link
|
|
||||||
// in the label chain) is out of range. In that case the caller is responsible
|
|
||||||
// for generating appropriate code.
|
|
||||||
// Otherwise it returns false.
|
|
||||||
// This function also checks wether veneers need to be emitted.
|
|
||||||
bool NeedExtraInstructionsOrRegisterBranch(Label* label,
|
|
||||||
ImmBranchType branch_type);
|
|
||||||
|
|
||||||
void Movi16bitHelper(const VRegister& vd, uint64_t imm);
|
|
||||||
void Movi32bitHelper(const VRegister& vd, uint64_t imm);
|
|
||||||
void Movi64bitHelper(const VRegister& vd, uint64_t imm);
|
|
||||||
|
|
||||||
void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
|
|
||||||
LoadStoreOp op);
|
|
||||||
|
|
||||||
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
|
|
||||||
const MemOperand& addr, LoadStorePairOp op);
|
|
||||||
|
|
||||||
int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
|
|
||||||
byte* pc);
|
|
||||||
|
|
||||||
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
|
|
||||||
};
|
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|
||||||
public:
|
|
||||||
using TurboAssembler::TurboAssembler;
|
|
||||||
|
|
||||||
// Instruction set functions ------------------------------------------------
|
// Instruction set functions ------------------------------------------------
|
||||||
// Logical macros.
|
// Logical macros.
|
||||||
inline void Bics(const Register& rd, const Register& rn,
|
inline void Bics(const Register& rd, const Register& rn,
|
||||||
@ -1594,18 +1506,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
Condition cond);
|
Condition cond);
|
||||||
inline void Extr(const Register& rd, const Register& rn, const Register& rm,
|
inline void Extr(const Register& rd, const Register& rn, const Register& rm,
|
||||||
unsigned lsb);
|
unsigned lsb);
|
||||||
void Fcvtl(const VRegister& vd, const VRegister& vn) {
|
|
||||||
DCHECK(allow_macro_instructions());
|
|
||||||
fcvtl(vd, vn);
|
|
||||||
}
|
|
||||||
void Fcvtl2(const VRegister& vd, const VRegister& vn) {
|
void Fcvtl2(const VRegister& vd, const VRegister& vn) {
|
||||||
DCHECK(allow_macro_instructions());
|
DCHECK(allow_macro_instructions());
|
||||||
fcvtl2(vd, vn);
|
fcvtl2(vd, vn);
|
||||||
}
|
}
|
||||||
void Fcvtn(const VRegister& vd, const VRegister& vn) {
|
|
||||||
DCHECK(allow_macro_instructions());
|
|
||||||
fcvtn(vd, vn);
|
|
||||||
}
|
|
||||||
void Fcvtn2(const VRegister& vd, const VRegister& vn) {
|
void Fcvtn2(const VRegister& vd, const VRegister& vn) {
|
||||||
DCHECK(allow_macro_instructions());
|
DCHECK(allow_macro_instructions());
|
||||||
fcvtn2(vd, vn);
|
fcvtn2(vd, vn);
|
||||||
@ -1641,7 +1545,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
DCHECK(allow_macro_instructions());
|
DCHECK(allow_macro_instructions());
|
||||||
mvni(vd, imm8, shift, shift_amount);
|
mvni(vd, imm8, shift, shift_amount);
|
||||||
}
|
}
|
||||||
inline void Rev(const Register& rd, const Register& rn);
|
|
||||||
inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
|
inline void Smaddl(const Register& rd, const Register& rn, const Register& rm,
|
||||||
const Register& ra);
|
const Register& ra);
|
||||||
inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
|
inline void Smsubl(const Register& rd, const Register& rn, const Register& rm,
|
||||||
@ -2139,6 +2042,76 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
Register feedback_vector, FeedbackSlot slot,
|
Register feedback_vector, FeedbackSlot slot,
|
||||||
Label* on_result, Label::Distance distance);
|
Label* on_result, Label::Distance distance);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// The actual Push and Pop implementations. These don't generate any code
|
||||||
|
// other than that required for the push or pop. This allows
|
||||||
|
// (Push|Pop)CPURegList to bundle together run-time assertions for a large
|
||||||
|
// block of registers.
|
||||||
|
//
|
||||||
|
// Note that size is per register, and is specified in bytes.
|
||||||
|
void PushHelper(int count, int size, const CPURegister& src0,
|
||||||
|
const CPURegister& src1, const CPURegister& src2,
|
||||||
|
const CPURegister& src3);
|
||||||
|
void PopHelper(int count, int size, const CPURegister& dst0,
|
||||||
|
const CPURegister& dst1, const CPURegister& dst2,
|
||||||
|
const CPURegister& dst3);
|
||||||
|
|
||||||
|
void ConditionalCompareMacro(const Register& rn, const Operand& operand,
|
||||||
|
StatusFlags nzcv, Condition cond,
|
||||||
|
ConditionalCompareOp op);
|
||||||
|
|
||||||
|
void AddSubWithCarryMacro(const Register& rd, const Register& rn,
|
||||||
|
const Operand& operand, FlagsUpdate S,
|
||||||
|
AddSubWithCarryOp op);
|
||||||
|
|
||||||
|
// Call Printf. On a native build, a simple call will be generated, but if the
|
||||||
|
// simulator is being used then a suitable pseudo-instruction is used. The
|
||||||
|
// arguments and stack must be prepared by the caller as for a normal AAPCS64
|
||||||
|
// call to 'printf'.
|
||||||
|
//
|
||||||
|
// The 'args' argument should point to an array of variable arguments in their
|
||||||
|
// proper PCS registers (and in calling order). The argument registers can
|
||||||
|
// have mixed types. The format string (x0) should not be included.
|
||||||
|
void CallPrintf(int arg_count = 0, const CPURegister* args = nullptr);
|
||||||
|
|
||||||
|
private:
|
||||||
|
#if DEBUG
|
||||||
|
// Tell whether any of the macro instruction can be used. When false the
|
||||||
|
// MacroAssembler will assert if a method which can emit a variable number
|
||||||
|
// of instructions is called.
|
||||||
|
bool allow_macro_instructions_ = true;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Scratch registers available for use by the MacroAssembler.
|
||||||
|
CPURegList tmp_list_ = DefaultTmpList();
|
||||||
|
CPURegList fptmp_list_ = DefaultFPTmpList();
|
||||||
|
|
||||||
|
// Helps resolve branching to labels potentially out of range.
|
||||||
|
// If the label is not bound, it registers the information necessary to later
|
||||||
|
// be able to emit a veneer for this branch if necessary.
|
||||||
|
// If the label is bound, it returns true if the label (or the previous link
|
||||||
|
// in the label chain) is out of range. In that case the caller is responsible
|
||||||
|
// for generating appropriate code.
|
||||||
|
// Otherwise it returns false.
|
||||||
|
// This function also checks wether veneers need to be emitted.
|
||||||
|
bool NeedExtraInstructionsOrRegisterBranch(Label* label,
|
||||||
|
ImmBranchType branch_type);
|
||||||
|
|
||||||
|
void Movi16bitHelper(const VRegister& vd, uint64_t imm);
|
||||||
|
void Movi32bitHelper(const VRegister& vd, uint64_t imm);
|
||||||
|
void Movi64bitHelper(const VRegister& vd, uint64_t imm);
|
||||||
|
|
||||||
|
void LoadStoreMacro(const CPURegister& rt, const MemOperand& addr,
|
||||||
|
LoadStoreOp op);
|
||||||
|
|
||||||
|
void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
|
||||||
|
const MemOperand& addr, LoadStorePairOp op);
|
||||||
|
|
||||||
|
int64_t CalculateTargetOffset(Address target, RelocInfo::Mode rmode,
|
||||||
|
byte* pc);
|
||||||
|
|
||||||
|
void JumpHelper(int64_t offset, RelocInfo::Mode rmode, Condition cond = al);
|
||||||
|
|
||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
|
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -2148,38 +2121,38 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
// emitted is what you specified when creating the scope.
|
// emitted is what you specified when creating the scope.
|
||||||
class V8_NODISCARD InstructionAccurateScope {
|
class V8_NODISCARD InstructionAccurateScope {
|
||||||
public:
|
public:
|
||||||
explicit InstructionAccurateScope(TurboAssembler* tasm, size_t count = 0)
|
explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
|
||||||
: tasm_(tasm),
|
: masm_(masm),
|
||||||
block_pool_(tasm, count * kInstrSize)
|
block_pool_(masm, count * kInstrSize)
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
,
|
,
|
||||||
size_(count * kInstrSize)
|
size_(count * kInstrSize)
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
tasm_->CheckVeneerPool(false, true, count * kInstrSize);
|
masm_->CheckVeneerPool(false, true, count * kInstrSize);
|
||||||
tasm_->StartBlockVeneerPool();
|
masm_->StartBlockVeneerPool();
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (count != 0) {
|
if (count != 0) {
|
||||||
tasm_->bind(&start_);
|
masm_->bind(&start_);
|
||||||
}
|
}
|
||||||
previous_allow_macro_instructions_ = tasm_->allow_macro_instructions();
|
previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
|
||||||
tasm_->set_allow_macro_instructions(false);
|
masm_->set_allow_macro_instructions(false);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
~InstructionAccurateScope() {
|
~InstructionAccurateScope() {
|
||||||
tasm_->EndBlockVeneerPool();
|
masm_->EndBlockVeneerPool();
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
if (start_.is_bound()) {
|
if (start_.is_bound()) {
|
||||||
DCHECK(tasm_->SizeOfCodeGeneratedSince(&start_) == size_);
|
DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
|
||||||
}
|
}
|
||||||
tasm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
|
masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
TurboAssembler* tasm_;
|
MacroAssembler* masm_;
|
||||||
TurboAssembler::BlockConstPoolScope block_pool_;
|
MacroAssembler::BlockConstPoolScope block_pool_;
|
||||||
#ifdef DEBUG
|
#ifdef DEBUG
|
||||||
size_t size_;
|
size_t size_;
|
||||||
Label start_;
|
Label start_;
|
||||||
@ -2188,7 +2161,7 @@ class V8_NODISCARD InstructionAccurateScope {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// This scope utility allows scratch registers to be managed safely. The
|
// This scope utility allows scratch registers to be managed safely. The
|
||||||
// TurboAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
|
// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
|
||||||
// registers. These registers can be allocated on demand, and will be returned
|
// registers. These registers can be allocated on demand, and will be returned
|
||||||
// at the end of the scope.
|
// at the end of the scope.
|
||||||
//
|
//
|
||||||
@ -2198,9 +2171,9 @@ class V8_NODISCARD InstructionAccurateScope {
|
|||||||
// order as the constructors. We do not have assertions for this.
|
// order as the constructors. We do not have assertions for this.
|
||||||
class V8_NODISCARD UseScratchRegisterScope {
|
class V8_NODISCARD UseScratchRegisterScope {
|
||||||
public:
|
public:
|
||||||
explicit UseScratchRegisterScope(TurboAssembler* tasm)
|
explicit UseScratchRegisterScope(MacroAssembler* masm)
|
||||||
: available_(tasm->TmpList()),
|
: available_(masm->TmpList()),
|
||||||
availablefp_(tasm->FPTmpList()),
|
availablefp_(masm->FPTmpList()),
|
||||||
old_available_(available_->bits()),
|
old_available_(available_->bits()),
|
||||||
old_availablefp_(availablefp_->bits()) {
|
old_availablefp_(availablefp_->bits()) {
|
||||||
DCHECK_EQ(available_->type(), CPURegister::kRegister);
|
DCHECK_EQ(available_->type(), CPURegister::kRegister);
|
||||||
|
@ -166,7 +166,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
|
|||||||
V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
|
V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \
|
||||||
V(Function_string, function_string, FunctionString) \
|
V(Function_string, function_string, FunctionString) \
|
||||||
V(function_to_string, function_to_string, FunctionToString) \
|
V(function_to_string, function_to_string, FunctionToString) \
|
||||||
V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \
|
|
||||||
V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
|
V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \
|
||||||
V(Infinity_string, Infinity_string, InfinityString) \
|
V(Infinity_string, Infinity_string, InfinityString) \
|
||||||
V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \
|
V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \
|
||||||
|
@ -21,11 +21,11 @@
|
|||||||
#include "src/codegen/ia32/register-ia32.h"
|
#include "src/codegen/ia32/register-ia32.h"
|
||||||
#include "src/codegen/interface-descriptors-inl.h"
|
#include "src/codegen/interface-descriptors-inl.h"
|
||||||
#include "src/codegen/label.h"
|
#include "src/codegen/label.h"
|
||||||
|
#include "src/codegen/macro-assembler-base.h"
|
||||||
#include "src/codegen/macro-assembler.h"
|
#include "src/codegen/macro-assembler.h"
|
||||||
#include "src/codegen/register.h"
|
#include "src/codegen/register.h"
|
||||||
#include "src/codegen/reglist.h"
|
#include "src/codegen/reglist.h"
|
||||||
#include "src/codegen/reloc-info.h"
|
#include "src/codegen/reloc-info.h"
|
||||||
#include "src/codegen/turbo-assembler.h"
|
|
||||||
#include "src/common/globals.h"
|
#include "src/common/globals.h"
|
||||||
#include "src/deoptimizer/deoptimizer.h"
|
#include "src/deoptimizer/deoptimizer.h"
|
||||||
#include "src/execution/frame-constants.h"
|
#include "src/execution/frame-constants.h"
|
||||||
@ -77,18 +77,18 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
|
|||||||
// -------------------------------------------------------------------------
|
// -------------------------------------------------------------------------
|
||||||
// MacroAssembler implementation.
|
// MacroAssembler implementation.
|
||||||
|
|
||||||
void TurboAssembler::InitializeRootRegister() {
|
void MacroAssembler::InitializeRootRegister() {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
|
ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
|
||||||
Move(kRootRegister, Immediate(isolate_root));
|
Move(kRootRegister, Immediate(isolate_root));
|
||||||
}
|
}
|
||||||
|
|
||||||
Operand TurboAssembler::RootAsOperand(RootIndex index) {
|
Operand MacroAssembler::RootAsOperand(RootIndex index) {
|
||||||
DCHECK(root_array_available());
|
DCHECK(root_array_available());
|
||||||
return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
|
return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
|
void MacroAssembler::LoadRoot(Register destination, RootIndex index) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (root_array_available()) {
|
if (root_array_available()) {
|
||||||
mov(destination, RootAsOperand(index));
|
mov(destination, RootAsOperand(index));
|
||||||
@ -113,7 +113,7 @@ void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
|
|||||||
mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index)));
|
mov(destination, Operand(destination, RootRegisterOffsetForRootIndex(index)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CompareRoot(Register with, Register scratch,
|
void MacroAssembler::CompareRoot(Register with, Register scratch,
|
||||||
RootIndex index) {
|
RootIndex index) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (root_array_available()) {
|
if (root_array_available()) {
|
||||||
@ -126,7 +126,7 @@ void TurboAssembler::CompareRoot(Register with, Register scratch,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CompareRoot(Register with, RootIndex index) {
|
void MacroAssembler::CompareRoot(Register with, RootIndex index) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (root_array_available()) {
|
if (root_array_available()) {
|
||||||
cmp(with, RootAsOperand(index));
|
cmp(with, RootAsOperand(index));
|
||||||
@ -180,7 +180,7 @@ void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
|
|||||||
j(below_equal, on_in_range, near_jump);
|
j(below_equal, on_in_range, near_jump);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::PushArray(Register array, Register size, Register scratch,
|
void MacroAssembler::PushArray(Register array, Register size, Register scratch,
|
||||||
PushArrayOrder order) {
|
PushArrayOrder order) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(!AreAliased(array, size, scratch));
|
DCHECK(!AreAliased(array, size, scratch));
|
||||||
@ -206,7 +206,7 @@ void TurboAssembler::PushArray(Register array, Register size, Register scratch,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
|
Operand MacroAssembler::ExternalReferenceAsOperand(ExternalReference reference,
|
||||||
Register scratch) {
|
Register scratch) {
|
||||||
if (root_array_available() && options().enable_root_relative_access) {
|
if (root_array_available() && options().enable_root_relative_access) {
|
||||||
intptr_t delta =
|
intptr_t delta =
|
||||||
@ -233,8 +233,8 @@ Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO(v8:6666): If possible, refactor into a platform-independent function in
|
// TODO(v8:6666): If possible, refactor into a platform-independent function in
|
||||||
// TurboAssembler.
|
// MacroAssembler.
|
||||||
Operand TurboAssembler::ExternalReferenceAddressAsOperand(
|
Operand MacroAssembler::ExternalReferenceAddressAsOperand(
|
||||||
ExternalReference reference) {
|
ExternalReference reference) {
|
||||||
DCHECK(root_array_available());
|
DCHECK(root_array_available());
|
||||||
DCHECK(options().isolate_independent_code);
|
DCHECK(options().isolate_independent_code);
|
||||||
@ -244,8 +244,8 @@ Operand TurboAssembler::ExternalReferenceAddressAsOperand(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO(v8:6666): If possible, refactor into a platform-independent function in
|
// TODO(v8:6666): If possible, refactor into a platform-independent function in
|
||||||
// TurboAssembler.
|
// MacroAssembler.
|
||||||
Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
|
Operand MacroAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
|
||||||
DCHECK(root_array_available());
|
DCHECK(root_array_available());
|
||||||
|
|
||||||
Builtin builtin;
|
Builtin builtin;
|
||||||
@ -264,7 +264,7 @@ Operand TurboAssembler::HeapObjectAsOperand(Handle<HeapObject> object) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadFromConstantsTable(Register destination,
|
void MacroAssembler::LoadFromConstantsTable(Register destination,
|
||||||
int constant_index) {
|
int constant_index) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
|
||||||
@ -273,7 +273,7 @@ void TurboAssembler::LoadFromConstantsTable(Register destination,
|
|||||||
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
|
FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadRootRegisterOffset(Register destination,
|
void MacroAssembler::LoadRootRegisterOffset(Register destination,
|
||||||
intptr_t offset) {
|
intptr_t offset) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(is_int32(offset));
|
DCHECK(is_int32(offset));
|
||||||
@ -285,13 +285,13 @@ void TurboAssembler::LoadRootRegisterOffset(Register destination,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
void MacroAssembler::LoadRootRelative(Register destination, int32_t offset) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(root_array_available());
|
DCHECK(root_array_available());
|
||||||
mov(destination, Operand(kRootRegister, offset));
|
mov(destination, Operand(kRootRegister, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadAddress(Register destination,
|
void MacroAssembler::LoadAddress(Register destination,
|
||||||
ExternalReference source) {
|
ExternalReference source) {
|
||||||
// TODO(jgruber): Add support for enable_root_relative_access.
|
// TODO(jgruber): Add support for enable_root_relative_access.
|
||||||
if (root_array_available() && options().isolate_independent_code) {
|
if (root_array_available() && options().isolate_independent_code) {
|
||||||
@ -301,7 +301,7 @@ void TurboAssembler::LoadAddress(Register destination,
|
|||||||
mov(destination, Immediate(source));
|
mov(destination, Immediate(source));
|
||||||
}
|
}
|
||||||
|
|
||||||
int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
|
int MacroAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
|
||||||
Register exclusion) const {
|
Register exclusion) const {
|
||||||
int bytes = 0;
|
int bytes = 0;
|
||||||
RegList saved_regs = kCallerSaved - exclusion;
|
RegList saved_regs = kCallerSaved - exclusion;
|
||||||
@ -315,7 +315,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
|
|||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
|
int MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
|
||||||
Register exclusion) {
|
Register exclusion) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// We don't allow a GC in a write barrier slow path so there is no need to
|
// We don't allow a GC in a write barrier slow path so there is no need to
|
||||||
@ -346,7 +346,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
|
|||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
|
int MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
int bytes = 0;
|
int bytes = 0;
|
||||||
if (fp_mode == SaveFPRegsMode::kSave) {
|
if (fp_mode == SaveFPRegsMode::kSave) {
|
||||||
@ -412,19 +412,19 @@ void MacroAssembler::RecordWriteField(Register object, int offset,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::MaybeSaveRegisters(RegList registers) {
|
void MacroAssembler::MaybeSaveRegisters(RegList registers) {
|
||||||
for (Register reg : registers) {
|
for (Register reg : registers) {
|
||||||
push(reg);
|
push(reg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
|
void MacroAssembler::MaybeRestoreRegisters(RegList registers) {
|
||||||
for (Register reg : base::Reversed(registers)) {
|
for (Register reg : base::Reversed(registers)) {
|
||||||
pop(reg);
|
pop(reg);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallEphemeronKeyBarrier(Register object,
|
void MacroAssembler::CallEphemeronKeyBarrier(Register object,
|
||||||
Register slot_address,
|
Register slot_address,
|
||||||
SaveFPRegsMode fp_mode) {
|
SaveFPRegsMode fp_mode) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
@ -449,7 +449,7 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object,
|
|||||||
MaybeRestoreRegisters(registers);
|
MaybeRestoreRegisters(registers);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
|
void MacroAssembler::CallRecordWriteStubSaveRegisters(Register object,
|
||||||
Register slot_address,
|
Register slot_address,
|
||||||
SaveFPRegsMode fp_mode,
|
SaveFPRegsMode fp_mode,
|
||||||
StubCallMode mode) {
|
StubCallMode mode) {
|
||||||
@ -473,7 +473,7 @@ void TurboAssembler::CallRecordWriteStubSaveRegisters(Register object,
|
|||||||
MaybeRestoreRegisters(registers);
|
MaybeRestoreRegisters(registers);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallRecordWriteStub(Register object, Register slot_address,
|
void MacroAssembler::CallRecordWriteStub(Register object, Register slot_address,
|
||||||
SaveFPRegsMode fp_mode,
|
SaveFPRegsMode fp_mode,
|
||||||
StubCallMode mode) {
|
StubCallMode mode) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
@ -547,17 +547,17 @@ void MacroAssembler::RecordWrite(Register object, Register slot_address,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
|
void MacroAssembler::Cvtsi2ss(XMMRegister dst, Operand src) {
|
||||||
xorps(dst, dst);
|
xorps(dst, dst);
|
||||||
cvtsi2ss(dst, src);
|
cvtsi2ss(dst, src);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
|
void MacroAssembler::Cvtsi2sd(XMMRegister dst, Operand src) {
|
||||||
xorpd(dst, dst);
|
xorpd(dst, dst);
|
||||||
cvtsi2sd(dst, src);
|
cvtsi2sd(dst, src);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
|
void MacroAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
|
||||||
Label done;
|
Label done;
|
||||||
Register src_reg = src.is_reg_only() ? src.reg() : tmp;
|
Register src_reg = src.is_reg_only() ? src.reg() : tmp;
|
||||||
if (src_reg == tmp) mov(tmp, src);
|
if (src_reg == tmp) mov(tmp, src);
|
||||||
@ -578,7 +578,7 @@ void TurboAssembler::Cvtui2ss(XMMRegister dst, Operand src, Register tmp) {
|
|||||||
bind(&done);
|
bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
|
void MacroAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
|
||||||
Label done;
|
Label done;
|
||||||
cvttss2si(dst, src);
|
cvttss2si(dst, src);
|
||||||
test(dst, dst);
|
test(dst, dst);
|
||||||
@ -590,7 +590,7 @@ void TurboAssembler::Cvttss2ui(Register dst, Operand src, XMMRegister tmp) {
|
|||||||
bind(&done);
|
bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
|
void MacroAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
|
||||||
Label done;
|
Label done;
|
||||||
cmp(src, Immediate(0));
|
cmp(src, Immediate(0));
|
||||||
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
|
ExternalReference uint32_bias = ExternalReference::address_of_uint32_bias();
|
||||||
@ -600,14 +600,14 @@ void TurboAssembler::Cvtui2sd(XMMRegister dst, Operand src, Register scratch) {
|
|||||||
bind(&done);
|
bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
|
void MacroAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
|
||||||
Move(tmp, -2147483648.0);
|
Move(tmp, -2147483648.0);
|
||||||
addsd(tmp, src);
|
addsd(tmp, src);
|
||||||
cvttsd2si(dst, tmp);
|
cvttsd2si(dst, tmp);
|
||||||
add(dst, Immediate(0x80000000));
|
add(dst, Immediate(0x80000000));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
|
void MacroAssembler::ShlPair(Register high, Register low, uint8_t shift) {
|
||||||
DCHECK_GE(63, shift);
|
DCHECK_GE(63, shift);
|
||||||
if (shift >= 32) {
|
if (shift >= 32) {
|
||||||
mov(high, low);
|
mov(high, low);
|
||||||
@ -619,7 +619,7 @@ void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::ShlPair_cl(Register high, Register low) {
|
void MacroAssembler::ShlPair_cl(Register high, Register low) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
shld_cl(high, low);
|
shld_cl(high, low);
|
||||||
shl_cl(low);
|
shl_cl(low);
|
||||||
@ -631,7 +631,7 @@ void TurboAssembler::ShlPair_cl(Register high, Register low) {
|
|||||||
bind(&done);
|
bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
|
void MacroAssembler::ShrPair(Register high, Register low, uint8_t shift) {
|
||||||
DCHECK_GE(63, shift);
|
DCHECK_GE(63, shift);
|
||||||
if (shift >= 32) {
|
if (shift >= 32) {
|
||||||
mov(low, high);
|
mov(low, high);
|
||||||
@ -643,7 +643,7 @@ void TurboAssembler::ShrPair(Register high, Register low, uint8_t shift) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::ShrPair_cl(Register high, Register low) {
|
void MacroAssembler::ShrPair_cl(Register high, Register low) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
shrd_cl(low, high);
|
shrd_cl(low, high);
|
||||||
shr_cl(high);
|
shr_cl(high);
|
||||||
@ -655,7 +655,7 @@ void TurboAssembler::ShrPair_cl(Register high, Register low) {
|
|||||||
bind(&done);
|
bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
|
void MacroAssembler::SarPair(Register high, Register low, uint8_t shift) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_GE(63, shift);
|
DCHECK_GE(63, shift);
|
||||||
if (shift >= 32) {
|
if (shift >= 32) {
|
||||||
@ -668,7 +668,7 @@ void TurboAssembler::SarPair(Register high, Register low, uint8_t shift) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::SarPair_cl(Register high, Register low) {
|
void MacroAssembler::SarPair_cl(Register high, Register low) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
shrd_cl(low, high);
|
shrd_cl(low, high);
|
||||||
sar_cl(high);
|
sar_cl(high);
|
||||||
@ -680,7 +680,7 @@ void TurboAssembler::SarPair_cl(Register high, Register low) {
|
|||||||
bind(&done);
|
bind(&done);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadMap(Register destination, Register object) {
|
void MacroAssembler::LoadMap(Register destination, Register object) {
|
||||||
mov(destination, FieldOperand(object, HeapObject::kMapOffset));
|
mov(destination, FieldOperand(object, HeapObject::kMapOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -979,23 +979,23 @@ void MacroAssembler::AssertNotSmi(Register object) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Assert(Condition cc, AbortReason reason) {
|
void MacroAssembler::Assert(Condition cc, AbortReason reason) {
|
||||||
if (v8_flags.debug_code) Check(cc, reason);
|
if (v8_flags.debug_code) Check(cc, reason);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::AssertUnreachable(AbortReason reason) {
|
void MacroAssembler::AssertUnreachable(AbortReason reason) {
|
||||||
if (v8_flags.debug_code) Abort(reason);
|
if (v8_flags.debug_code) Abort(reason);
|
||||||
}
|
}
|
||||||
#endif // V8_ENABLE_DEBUG_CODE
|
#endif // V8_ENABLE_DEBUG_CODE
|
||||||
|
|
||||||
void TurboAssembler::StubPrologue(StackFrame::Type type) {
|
void MacroAssembler::StubPrologue(StackFrame::Type type) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
push(ebp); // Caller's frame pointer.
|
push(ebp); // Caller's frame pointer.
|
||||||
mov(ebp, esp);
|
mov(ebp, esp);
|
||||||
push(Immediate(StackFrame::TypeToMarker(type)));
|
push(Immediate(StackFrame::TypeToMarker(type)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Prologue() {
|
void MacroAssembler::Prologue() {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
push(ebp); // Caller's frame pointer.
|
push(ebp); // Caller's frame pointer.
|
||||||
mov(ebp, esp);
|
mov(ebp, esp);
|
||||||
@ -1004,7 +1004,7 @@ void TurboAssembler::Prologue() {
|
|||||||
push(kJavaScriptCallArgCountRegister); // Actual argument count.
|
push(kJavaScriptCallArgCountRegister); // Actual argument count.
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
|
void MacroAssembler::DropArguments(Register count, ArgumentsCountType type,
|
||||||
ArgumentsCountMode mode) {
|
ArgumentsCountMode mode) {
|
||||||
int receiver_bytes =
|
int receiver_bytes =
|
||||||
(mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
|
(mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
|
||||||
@ -1034,7 +1034,7 @@ void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::DropArguments(Register count, Register scratch,
|
void MacroAssembler::DropArguments(Register count, Register scratch,
|
||||||
ArgumentsCountType type,
|
ArgumentsCountType type,
|
||||||
ArgumentsCountMode mode) {
|
ArgumentsCountMode mode) {
|
||||||
DCHECK(!AreAliased(count, scratch));
|
DCHECK(!AreAliased(count, scratch));
|
||||||
@ -1043,7 +1043,7 @@ void TurboAssembler::DropArguments(Register count, Register scratch,
|
|||||||
PushReturnAddressFrom(scratch);
|
PushReturnAddressFrom(scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
|
void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
|
||||||
Register receiver,
|
Register receiver,
|
||||||
Register scratch,
|
Register scratch,
|
||||||
ArgumentsCountType type,
|
ArgumentsCountType type,
|
||||||
@ -1055,7 +1055,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
|
|||||||
PushReturnAddressFrom(scratch);
|
PushReturnAddressFrom(scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
|
void MacroAssembler::DropArgumentsAndPushNewReceiver(Register argc,
|
||||||
Operand receiver,
|
Operand receiver,
|
||||||
Register scratch,
|
Register scratch,
|
||||||
ArgumentsCountType type,
|
ArgumentsCountType type,
|
||||||
@ -1068,7 +1068,7 @@ void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
|
|||||||
PushReturnAddressFrom(scratch);
|
PushReturnAddressFrom(scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
push(ebp);
|
push(ebp);
|
||||||
mov(ebp, esp);
|
mov(ebp, esp);
|
||||||
@ -1080,7 +1080,7 @@ void TurboAssembler::EnterFrame(StackFrame::Type type) {
|
|||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
|
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (v8_flags.debug_code && !StackFrame::IsJavaScript(type)) {
|
if (v8_flags.debug_code && !StackFrame::IsJavaScript(type)) {
|
||||||
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
|
cmp(Operand(ebp, CommonFrameConstants::kContextOrFrameTypeOffset),
|
||||||
@ -1091,7 +1091,7 @@ void TurboAssembler::LeaveFrame(StackFrame::Type type) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#ifdef V8_OS_WIN
|
#ifdef V8_OS_WIN
|
||||||
void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
|
void MacroAssembler::AllocateStackSpace(Register bytes_scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// In windows, we cannot increment the stack size by more than one page
|
// In windows, we cannot increment the stack size by more than one page
|
||||||
// (minimum page size is 4KB) without accessing at least one byte on the
|
// (minimum page size is 4KB) without accessing at least one byte on the
|
||||||
@ -1113,7 +1113,7 @@ void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
|
|||||||
sub(esp, bytes_scratch);
|
sub(esp, bytes_scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::AllocateStackSpace(int bytes) {
|
void MacroAssembler::AllocateStackSpace(int bytes) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_GE(bytes, 0);
|
DCHECK_GE(bytes, 0);
|
||||||
while (bytes >= kStackPageSize) {
|
while (bytes >= kStackPageSize) {
|
||||||
@ -1332,10 +1332,10 @@ void MacroAssembler::CompareStackLimit(Register with, StackLimitKind kind) {
|
|||||||
kind == StackLimitKind::kRealStackLimit
|
kind == StackLimitKind::kRealStackLimit
|
||||||
? ExternalReference::address_of_real_jslimit(isolate)
|
? ExternalReference::address_of_real_jslimit(isolate)
|
||||||
: ExternalReference::address_of_jslimit(isolate);
|
: ExternalReference::address_of_jslimit(isolate);
|
||||||
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
|
DCHECK(MacroAssembler::IsAddressableThroughRootRegister(isolate, limit));
|
||||||
|
|
||||||
intptr_t offset =
|
intptr_t offset =
|
||||||
TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
|
MacroAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
|
||||||
cmp(with, Operand(kRootRegister, offset));
|
cmp(with, Operand(kRootRegister, offset));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1565,9 +1565,9 @@ void MacroAssembler::LoadNativeContextSlot(Register destination, int index) {
|
|||||||
mov(destination, Operand(destination, Context::SlotOffset(index)));
|
mov(destination, Operand(destination, Context::SlotOffset(index)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Ret() { ret(0); }
|
void MacroAssembler::Ret() { ret(0); }
|
||||||
|
|
||||||
void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
|
void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
|
||||||
if (is_uint16(bytes_dropped)) {
|
if (is_uint16(bytes_dropped)) {
|
||||||
ret(bytes_dropped);
|
ret(bytes_dropped);
|
||||||
} else {
|
} else {
|
||||||
@ -1578,7 +1578,7 @@ void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Push(Immediate value) {
|
void MacroAssembler::Push(Immediate value) {
|
||||||
if (root_array_available() && options().isolate_independent_code) {
|
if (root_array_available() && options().isolate_independent_code) {
|
||||||
if (value.is_embedded_object()) {
|
if (value.is_embedded_object()) {
|
||||||
Push(HeapObjectAsOperand(value.embedded_object()));
|
Push(HeapObjectAsOperand(value.embedded_object()));
|
||||||
@ -1597,13 +1597,13 @@ void MacroAssembler::Drop(int stack_elements) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Move(Register dst, Register src) {
|
void MacroAssembler::Move(Register dst, Register src) {
|
||||||
if (dst != src) {
|
if (dst != src) {
|
||||||
mov(dst, src);
|
mov(dst, src);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Move(Register dst, const Immediate& src) {
|
void MacroAssembler::Move(Register dst, const Immediate& src) {
|
||||||
if (!src.is_heap_number_request() && src.is_zero()) {
|
if (!src.is_heap_number_request() && src.is_zero()) {
|
||||||
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
|
xor_(dst, dst); // Shorter than mov of 32-bit immediate 0.
|
||||||
} else if (src.is_external_reference()) {
|
} else if (src.is_external_reference()) {
|
||||||
@ -1613,7 +1613,7 @@ void TurboAssembler::Move(Register dst, const Immediate& src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Move(Operand dst, const Immediate& src) {
|
void MacroAssembler::Move(Operand dst, const Immediate& src) {
|
||||||
// Since there's no scratch register available, take a detour through the
|
// Since there's no scratch register available, take a detour through the
|
||||||
// stack.
|
// stack.
|
||||||
if (root_array_available() && options().isolate_independent_code) {
|
if (root_array_available() && options().isolate_independent_code) {
|
||||||
@ -1632,9 +1632,9 @@ void TurboAssembler::Move(Operand dst, const Immediate& src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Move(Register dst, Operand src) { mov(dst, src); }
|
void MacroAssembler::Move(Register dst, Operand src) { mov(dst, src); }
|
||||||
|
|
||||||
void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
|
void MacroAssembler::Move(Register dst, Handle<HeapObject> src) {
|
||||||
if (root_array_available() && options().isolate_independent_code) {
|
if (root_array_available() && options().isolate_independent_code) {
|
||||||
IndirectLoadConstant(dst, src);
|
IndirectLoadConstant(dst, src);
|
||||||
return;
|
return;
|
||||||
@ -1642,7 +1642,7 @@ void TurboAssembler::Move(Register dst, Handle<HeapObject> src) {
|
|||||||
mov(dst, src);
|
mov(dst, src);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
|
void MacroAssembler::Move(XMMRegister dst, uint32_t src) {
|
||||||
if (src == 0) {
|
if (src == 0) {
|
||||||
pxor(dst, dst);
|
pxor(dst, dst);
|
||||||
} else {
|
} else {
|
||||||
@ -1666,7 +1666,7 @@ void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
|
void MacroAssembler::Move(XMMRegister dst, uint64_t src) {
|
||||||
if (src == 0) {
|
if (src == 0) {
|
||||||
pxor(dst, dst);
|
pxor(dst, dst);
|
||||||
} else {
|
} else {
|
||||||
@ -1705,7 +1705,7 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
|
void MacroAssembler::PextrdPreSse41(Register dst, XMMRegister src,
|
||||||
uint8_t imm8) {
|
uint8_t imm8) {
|
||||||
if (imm8 == 0) {
|
if (imm8 == 0) {
|
||||||
Movd(dst, src);
|
Movd(dst, src);
|
||||||
@ -1721,7 +1721,7 @@ void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
|
|||||||
add(esp, Immediate(kDoubleSize));
|
add(esp, Immediate(kDoubleSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
|
void MacroAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
|
||||||
uint32_t* load_pc_offset) {
|
uint32_t* load_pc_offset) {
|
||||||
// Without AVX or SSE, we can only have 64-bit values in xmm registers.
|
// Without AVX or SSE, we can only have 64-bit values in xmm registers.
|
||||||
// We don't have an xmm scratch register, so move the data via the stack. This
|
// We don't have an xmm scratch register, so move the data via the stack. This
|
||||||
@ -1742,7 +1742,7 @@ void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
|
|||||||
add(esp, Immediate(kDoubleSize));
|
add(esp, Immediate(kDoubleSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Lzcnt(Register dst, Operand src) {
|
void MacroAssembler::Lzcnt(Register dst, Operand src) {
|
||||||
if (CpuFeatures::IsSupported(LZCNT)) {
|
if (CpuFeatures::IsSupported(LZCNT)) {
|
||||||
CpuFeatureScope scope(this, LZCNT);
|
CpuFeatureScope scope(this, LZCNT);
|
||||||
lzcnt(dst, src);
|
lzcnt(dst, src);
|
||||||
@ -1756,7 +1756,7 @@ void TurboAssembler::Lzcnt(Register dst, Operand src) {
|
|||||||
xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
|
xor_(dst, Immediate(31)); // for x in [0..31], 31^x == 31-x.
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Tzcnt(Register dst, Operand src) {
|
void MacroAssembler::Tzcnt(Register dst, Operand src) {
|
||||||
if (CpuFeatures::IsSupported(BMI1)) {
|
if (CpuFeatures::IsSupported(BMI1)) {
|
||||||
CpuFeatureScope scope(this, BMI1);
|
CpuFeatureScope scope(this, BMI1);
|
||||||
tzcnt(dst, src);
|
tzcnt(dst, src);
|
||||||
@ -1769,7 +1769,7 @@ void TurboAssembler::Tzcnt(Register dst, Operand src) {
|
|||||||
bind(¬_zero_src);
|
bind(¬_zero_src);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Popcnt(Register dst, Operand src) {
|
void MacroAssembler::Popcnt(Register dst, Operand src) {
|
||||||
if (CpuFeatures::IsSupported(POPCNT)) {
|
if (CpuFeatures::IsSupported(POPCNT)) {
|
||||||
CpuFeatureScope scope(this, POPCNT);
|
CpuFeatureScope scope(this, POPCNT);
|
||||||
popcnt(dst, src);
|
popcnt(dst, src);
|
||||||
@ -1816,7 +1816,7 @@ void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Check(Condition cc, AbortReason reason) {
|
void MacroAssembler::Check(Condition cc, AbortReason reason) {
|
||||||
Label L;
|
Label L;
|
||||||
j(cc, &L);
|
j(cc, &L);
|
||||||
Abort(reason);
|
Abort(reason);
|
||||||
@ -1824,7 +1824,7 @@ void TurboAssembler::Check(Condition cc, AbortReason reason) {
|
|||||||
bind(&L);
|
bind(&L);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CheckStackAlignment() {
|
void MacroAssembler::CheckStackAlignment() {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
int frame_alignment = base::OS::ActivationFrameAlignment();
|
int frame_alignment = base::OS::ActivationFrameAlignment();
|
||||||
int frame_alignment_mask = frame_alignment - 1;
|
int frame_alignment_mask = frame_alignment - 1;
|
||||||
@ -1839,7 +1839,7 @@ void TurboAssembler::CheckStackAlignment() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Abort(AbortReason reason) {
|
void MacroAssembler::Abort(AbortReason reason) {
|
||||||
if (v8_flags.code_comments) {
|
if (v8_flags.code_comments) {
|
||||||
const char* msg = GetAbortReason(reason);
|
const char* msg = GetAbortReason(reason);
|
||||||
RecordComment("Abort message: ");
|
RecordComment("Abort message: ");
|
||||||
@ -1882,7 +1882,7 @@ void TurboAssembler::Abort(AbortReason reason) {
|
|||||||
int3();
|
int3();
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
|
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
int frame_alignment = base::OS::ActivationFrameAlignment();
|
int frame_alignment = base::OS::ActivationFrameAlignment();
|
||||||
if (frame_alignment != 0) {
|
if (frame_alignment != 0) {
|
||||||
@ -1898,14 +1898,14 @@ void TurboAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallCFunction(ExternalReference function,
|
void MacroAssembler::CallCFunction(ExternalReference function,
|
||||||
int num_arguments) {
|
int num_arguments) {
|
||||||
// Trashing eax is ok as it will be the return value.
|
// Trashing eax is ok as it will be the return value.
|
||||||
Move(eax, Immediate(function));
|
Move(eax, Immediate(function));
|
||||||
CallCFunction(eax, num_arguments);
|
CallCFunction(eax, num_arguments);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallCFunction(Register function, int num_arguments) {
|
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_LE(num_arguments, kMaxCParameters);
|
DCHECK_LE(num_arguments, kMaxCParameters);
|
||||||
DCHECK(has_frame());
|
DCHECK(has_frame());
|
||||||
@ -1956,7 +1956,7 @@ void TurboAssembler::CallCFunction(Register function, int num_arguments) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::PushPC() {
|
void MacroAssembler::PushPC() {
|
||||||
// Push the current PC onto the stack as "return address" via calling
|
// Push the current PC onto the stack as "return address" via calling
|
||||||
// the next instruction.
|
// the next instruction.
|
||||||
Label get_pc;
|
Label get_pc;
|
||||||
@ -1964,7 +1964,7 @@ void TurboAssembler::PushPC() {
|
|||||||
bind(&get_pc);
|
bind(&get_pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
|
void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||||
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
||||||
@ -1977,7 +1977,7 @@ void TurboAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
|
|||||||
call(code_object, rmode);
|
call(code_object, rmode);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
|
void MacroAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
static_assert(kSystemPointerSize == 4);
|
static_assert(kSystemPointerSize == 4);
|
||||||
static_assert(kSmiShiftSize == 0);
|
static_assert(kSmiShiftSize == 0);
|
||||||
@ -1993,13 +1993,13 @@ void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
|
|||||||
IsolateData::builtin_entry_table_offset()));
|
IsolateData::builtin_entry_table_offset()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
|
void MacroAssembler::CallBuiltinByIndex(Register builtin_index) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
LoadEntryFromBuiltinIndex(builtin_index);
|
LoadEntryFromBuiltinIndex(builtin_index);
|
||||||
call(builtin_index);
|
call(builtin_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallBuiltin(Builtin builtin) {
|
void MacroAssembler::CallBuiltin(Builtin builtin) {
|
||||||
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
|
ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
|
||||||
switch (options().builtin_call_jump_mode) {
|
switch (options().builtin_call_jump_mode) {
|
||||||
case BuiltinCallJumpMode::kAbsolute: {
|
case BuiltinCallJumpMode::kAbsolute: {
|
||||||
@ -2019,7 +2019,7 @@ void TurboAssembler::CallBuiltin(Builtin builtin) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::TailCallBuiltin(Builtin builtin) {
|
void MacroAssembler::TailCallBuiltin(Builtin builtin) {
|
||||||
ASM_CODE_COMMENT_STRING(this,
|
ASM_CODE_COMMENT_STRING(this,
|
||||||
CommentForOffHeapTrampoline("tail call", builtin));
|
CommentForOffHeapTrampoline("tail call", builtin));
|
||||||
switch (options().builtin_call_jump_mode) {
|
switch (options().builtin_call_jump_mode) {
|
||||||
@ -2040,17 +2040,17 @@ void TurboAssembler::TailCallBuiltin(Builtin builtin) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
|
Operand MacroAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
|
return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadCodeEntry(Register destination, Register code_object) {
|
void MacroAssembler::LoadCodeEntry(Register destination, Register code_object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
|
mov(destination, FieldOperand(code_object, Code::kCodeEntryPointOffset));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
|
void MacroAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
|
||||||
Register code_object) {
|
Register code_object) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// Compute the InstructionStream object pointer from the code entry point.
|
// Compute the InstructionStream object pointer from the code entry point.
|
||||||
@ -2058,12 +2058,12 @@ void TurboAssembler::LoadCodeInstructionStreamNonBuiltin(Register destination,
|
|||||||
sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
|
sub(destination, Immediate(InstructionStream::kHeaderSize - kHeapObjectTag));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallCodeObject(Register code_object) {
|
void MacroAssembler::CallCodeObject(Register code_object) {
|
||||||
LoadCodeEntry(code_object, code_object);
|
LoadCodeEntry(code_object, code_object);
|
||||||
call(code_object);
|
call(code_object);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
|
void MacroAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
|
||||||
LoadCodeEntry(code_object, code_object);
|
LoadCodeEntry(code_object, code_object);
|
||||||
switch (jump_mode) {
|
switch (jump_mode) {
|
||||||
case JumpMode::kJump:
|
case JumpMode::kJump:
|
||||||
@ -2076,13 +2076,13 @@ void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Jump(const ExternalReference& reference) {
|
void MacroAssembler::Jump(const ExternalReference& reference) {
|
||||||
DCHECK(root_array_available());
|
DCHECK(root_array_available());
|
||||||
jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
|
jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
|
||||||
isolate(), reference)));
|
isolate(), reference)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
|
void MacroAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
|
||||||
DCHECK_IMPLIES(options().isolate_independent_code,
|
DCHECK_IMPLIES(options().isolate_independent_code,
|
||||||
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
Builtins::IsIsolateIndependentBuiltin(*code_object));
|
||||||
Builtin builtin = Builtin::kNoBuiltinId;
|
Builtin builtin = Builtin::kNoBuiltinId;
|
||||||
@ -2094,7 +2094,7 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
|
|||||||
jmp(code_object, rmode);
|
jmp(code_object, rmode);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
|
void MacroAssembler::CheckPageFlag(Register object, Register scratch, int mask,
|
||||||
Condition cc, Label* condition_met,
|
Condition cc, Label* condition_met,
|
||||||
Label::Distance condition_met_distance) {
|
Label::Distance condition_met_distance) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
@ -2113,7 +2113,7 @@ void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
|
|||||||
j(cc, condition_met, condition_met_distance);
|
j(cc, condition_met, condition_met_distance);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
|
void MacroAssembler::ComputeCodeStartAddress(Register dst) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// In order to get the address of the current instruction, we first need
|
// In order to get the address of the current instruction, we first need
|
||||||
// to use a call and then use a pop, thus pushing the return address to
|
// to use a call and then use a pop, thus pushing the return address to
|
||||||
@ -2128,7 +2128,7 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
|
void MacroAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
|
||||||
DeoptimizeKind kind, Label* ret,
|
DeoptimizeKind kind, Label* ret,
|
||||||
Label*) {
|
Label*) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
@ -2138,8 +2138,8 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
|
|||||||
: Deoptimizer::kEagerDeoptExitSize);
|
: Deoptimizer::kEagerDeoptExitSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssembler::Trap() { int3(); }
|
void MacroAssembler::Trap() { int3(); }
|
||||||
void TurboAssembler::DebugBreak() { int3(); }
|
void MacroAssembler::DebugBreak() { int3(); }
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
} // namespace v8
|
} // namespace v8
|
||||||
|
@ -21,10 +21,10 @@
|
|||||||
#include "src/codegen/ia32/assembler-ia32.h"
|
#include "src/codegen/ia32/assembler-ia32.h"
|
||||||
#include "src/codegen/ia32/register-ia32.h"
|
#include "src/codegen/ia32/register-ia32.h"
|
||||||
#include "src/codegen/label.h"
|
#include "src/codegen/label.h"
|
||||||
|
#include "src/codegen/macro-assembler-base.h"
|
||||||
#include "src/codegen/reglist.h"
|
#include "src/codegen/reglist.h"
|
||||||
#include "src/codegen/reloc-info.h"
|
#include "src/codegen/reloc-info.h"
|
||||||
#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
|
#include "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h"
|
||||||
#include "src/codegen/turbo-assembler.h"
|
|
||||||
#include "src/common/globals.h"
|
#include "src/common/globals.h"
|
||||||
#include "src/execution/frames.h"
|
#include "src/execution/frames.h"
|
||||||
#include "src/handles/handles.h"
|
#include "src/handles/handles.h"
|
||||||
@ -68,10 +68,10 @@ class StackArgumentsAccessor {
|
|||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
|
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
|
||||||
};
|
};
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssembler
|
class V8_EXPORT_PRIVATE MacroAssembler
|
||||||
: public SharedTurboAssemblerBase<TurboAssembler> {
|
: public SharedMacroAssembler<MacroAssembler> {
|
||||||
public:
|
public:
|
||||||
using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
|
using SharedMacroAssembler<MacroAssembler>::SharedMacroAssembler;
|
||||||
|
|
||||||
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
|
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
|
||||||
Label* condition_met,
|
Label* condition_met,
|
||||||
@ -411,17 +411,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
|||||||
// Define an exception handler and bind a label.
|
// Define an exception handler and bind a label.
|
||||||
void BindExceptionHandler(Label* label) { bind(label); }
|
void BindExceptionHandler(Label* label) { bind(label); }
|
||||||
|
|
||||||
protected:
|
|
||||||
// Drops arguments assuming that the return address was already popped.
|
|
||||||
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
|
|
||||||
ArgumentsCountMode mode = kCountExcludesReceiver);
|
|
||||||
};
|
|
||||||
|
|
||||||
// MacroAssembler implements a collection of frequently used macros.
|
|
||||||
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|
||||||
public:
|
|
||||||
using TurboAssembler::TurboAssembler;
|
|
||||||
|
|
||||||
void PushRoot(RootIndex index);
|
void PushRoot(RootIndex index);
|
||||||
|
|
||||||
// Compare the object in a register to a value and jump if they are equal.
|
// Compare the object in a register to a value and jump if they are equal.
|
||||||
@ -671,6 +660,11 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
void StackOverflowCheck(Register num_args, Register scratch,
|
void StackOverflowCheck(Register num_args, Register scratch,
|
||||||
Label* stack_overflow, bool include_receiver = false);
|
Label* stack_overflow, bool include_receiver = false);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// Drops arguments assuming that the return address was already popped.
|
||||||
|
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
|
||||||
|
ArgumentsCountMode mode = kCountExcludesReceiver);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Helper functions for generating invokes.
|
// Helper functions for generating invokes.
|
||||||
void InvokePrologue(Register expected_parameter_count,
|
void InvokePrologue(Register expected_parameter_count,
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -59,9 +59,9 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
|
|||||||
return MemOperand(object, offset - kHeapObjectTag);
|
return MemOperand(object, offset - kHeapObjectTag);
|
||||||
}
|
}
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||||
public:
|
public:
|
||||||
using TurboAssemblerBase::TurboAssemblerBase;
|
using MacroAssemblerBase::MacroAssemblerBase;
|
||||||
|
|
||||||
// Activation support.
|
// Activation support.
|
||||||
void EnterFrame(StackFrame::Type type);
|
void EnterFrame(StackFrame::Type type);
|
||||||
@ -773,46 +773,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
// Define an exception handler and bind a label.
|
// Define an exception handler and bind a label.
|
||||||
void BindExceptionHandler(Label* label) { bind(label); }
|
void BindExceptionHandler(Label* label) { bind(label); }
|
||||||
|
|
||||||
protected:
|
|
||||||
inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
|
|
||||||
inline int32_t GetOffset(Label* L, OffsetSize bits);
|
|
||||||
|
|
||||||
private:
|
|
||||||
bool has_double_zero_reg_set_ = false;
|
|
||||||
|
|
||||||
// Performs a truncating conversion of a floating point number as used by
|
|
||||||
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
|
|
||||||
// succeeds, otherwise falls through if result is saturated. On return
|
|
||||||
// 'result' either holds answer, or is clobbered on fall through.
|
|
||||||
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
|
|
||||||
Label* done);
|
|
||||||
|
|
||||||
bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
|
|
||||||
const Operand& rk, bool need_link);
|
|
||||||
|
|
||||||
// f32 or f64
|
|
||||||
void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
|
|
||||||
CFRegister cd, bool f32 = true);
|
|
||||||
|
|
||||||
void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
|
|
||||||
bool f32 = true);
|
|
||||||
|
|
||||||
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
|
||||||
int num_double_arguments);
|
|
||||||
|
|
||||||
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
|
|
||||||
|
|
||||||
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
|
|
||||||
|
|
||||||
// Push a fixed frame, consisting of ra, fp.
|
|
||||||
void PushCommonFrame(Register marker_reg = no_reg);
|
|
||||||
};
|
|
||||||
|
|
||||||
// MacroAssembler implements a collection of frequently used macros.
|
|
||||||
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|
||||||
public:
|
|
||||||
using TurboAssembler::TurboAssembler;
|
|
||||||
|
|
||||||
// It assumes that the arguments are located below the stack pointer.
|
// It assumes that the arguments are located below the stack pointer.
|
||||||
// argc is the number of arguments not including the receiver.
|
// argc is the number of arguments not including the receiver.
|
||||||
// TODO(LOONG_dev): LOONG64: Remove this function once we stick with the
|
// TODO(LOONG_dev): LOONG64: Remove this function once we stick with the
|
||||||
@ -1079,17 +1039,50 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
DecodeField<Field>(reg, reg);
|
DecodeField<Field>(reg, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
|
||||||
|
inline int32_t GetOffset(Label* L, OffsetSize bits);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
bool has_double_zero_reg_set_ = false;
|
||||||
|
|
||||||
// Helper functions for generating invokes.
|
// Helper functions for generating invokes.
|
||||||
void InvokePrologue(Register expected_parameter_count,
|
void InvokePrologue(Register expected_parameter_count,
|
||||||
Register actual_parameter_count, Label* done,
|
Register actual_parameter_count, Label* done,
|
||||||
InvokeType type);
|
InvokeType type);
|
||||||
|
|
||||||
|
// Performs a truncating conversion of a floating point number as used by
|
||||||
|
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
|
||||||
|
// succeeds, otherwise falls through if result is saturated. On return
|
||||||
|
// 'result' either holds answer, or is clobbered on fall through.
|
||||||
|
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
|
||||||
|
Label* done);
|
||||||
|
|
||||||
|
bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
|
||||||
|
const Operand& rk, bool need_link);
|
||||||
|
|
||||||
|
// f32 or f64
|
||||||
|
void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
|
||||||
|
CFRegister cd, bool f32 = true);
|
||||||
|
|
||||||
|
void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
|
||||||
|
bool f32 = true);
|
||||||
|
|
||||||
|
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
||||||
|
int num_double_arguments);
|
||||||
|
|
||||||
|
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
|
||||||
|
|
||||||
|
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
|
||||||
|
|
||||||
|
// Push a fixed frame, consisting of ra, fp.
|
||||||
|
void PushCommonFrame(Register marker_reg = no_reg);
|
||||||
|
|
||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
|
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
|
void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
|
||||||
Func GetLabelFunction) {
|
Func GetLabelFunction) {
|
||||||
UseScratchRegisterScope scope(this);
|
UseScratchRegisterScope scope(this);
|
||||||
Register scratch = scope.Acquire();
|
Register scratch = scope.Acquire();
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file.
|
// found in the LICENSE file.
|
||||||
|
|
||||||
#include "src/codegen/turbo-assembler.h"
|
#include "src/codegen/macro-assembler-base.h"
|
||||||
|
|
||||||
#include "src/builtins/builtins.h"
|
#include "src/builtins/builtins.h"
|
||||||
#include "src/builtins/constants-table-builder.h"
|
#include "src/builtins/constants-table-builder.h"
|
||||||
@ -15,7 +15,7 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
|
MacroAssemblerBase::MacroAssemblerBase(Isolate* isolate,
|
||||||
const AssemblerOptions& options,
|
const AssemblerOptions& options,
|
||||||
CodeObjectRequired create_code_object,
|
CodeObjectRequired create_code_object,
|
||||||
std::unique_ptr<AssemblerBuffer> buffer)
|
std::unique_ptr<AssemblerBuffer> buffer)
|
||||||
@ -26,7 +26,7 @@ TurboAssemblerBase::TurboAssemblerBase(Isolate* isolate,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) {
|
Address MacroAssemblerBase::BuiltinEntry(Builtin builtin) {
|
||||||
DCHECK(Builtins::IsBuiltinId(builtin));
|
DCHECK(Builtins::IsBuiltinId(builtin));
|
||||||
if (isolate_ != nullptr) {
|
if (isolate_ != nullptr) {
|
||||||
Address entry = isolate_->builtin_entry_table()[Builtins::ToInt(builtin)];
|
Address entry = isolate_->builtin_entry_table()[Builtins::ToInt(builtin)];
|
||||||
@ -38,7 +38,7 @@ Address TurboAssemblerBase::BuiltinEntry(Builtin builtin) {
|
|||||||
return d.InstructionStartOfBuiltin(builtin);
|
return d.InstructionStartOfBuiltin(builtin);
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssemblerBase::IndirectLoadConstant(Register destination,
|
void MacroAssemblerBase::IndirectLoadConstant(Register destination,
|
||||||
Handle<HeapObject> object) {
|
Handle<HeapObject> object) {
|
||||||
CHECK(root_array_available_);
|
CHECK(root_array_available_);
|
||||||
|
|
||||||
@ -71,7 +71,7 @@ void TurboAssemblerBase::IndirectLoadConstant(Register destination,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void TurboAssemblerBase::IndirectLoadExternalReference(
|
void MacroAssemblerBase::IndirectLoadExternalReference(
|
||||||
Register destination, ExternalReference reference) {
|
Register destination, ExternalReference reference) {
|
||||||
CHECK(root_array_available_);
|
CHECK(root_array_available_);
|
||||||
|
|
||||||
@ -90,24 +90,24 @@ void TurboAssemblerBase::IndirectLoadExternalReference(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
int32_t TurboAssemblerBase::RootRegisterOffsetForRootIndex(
|
int32_t MacroAssemblerBase::RootRegisterOffsetForRootIndex(
|
||||||
RootIndex root_index) {
|
RootIndex root_index) {
|
||||||
return IsolateData::root_slot_offset(root_index);
|
return IsolateData::root_slot_offset(root_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
int32_t TurboAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
|
int32_t MacroAssemblerBase::RootRegisterOffsetForBuiltin(Builtin builtin) {
|
||||||
return IsolateData::BuiltinSlotOffset(builtin);
|
return IsolateData::BuiltinSlotOffset(builtin);
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
intptr_t TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
intptr_t MacroAssemblerBase::RootRegisterOffsetForExternalReference(
|
||||||
Isolate* isolate, const ExternalReference& reference) {
|
Isolate* isolate, const ExternalReference& reference) {
|
||||||
return static_cast<intptr_t>(reference.address() - isolate->isolate_root());
|
return static_cast<intptr_t>(reference.address() - isolate->isolate_root());
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
|
int32_t MacroAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
|
||||||
Isolate* isolate, const ExternalReference& reference) {
|
Isolate* isolate, const ExternalReference& reference) {
|
||||||
// Encode as an index into the external reference table stored on the
|
// Encode as an index into the external reference table stored on the
|
||||||
// isolate.
|
// isolate.
|
||||||
@ -120,13 +120,13 @@ int32_t TurboAssemblerBase::RootRegisterOffsetForExternalReferenceTableEntry(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
bool TurboAssemblerBase::IsAddressableThroughRootRegister(
|
bool MacroAssemblerBase::IsAddressableThroughRootRegister(
|
||||||
Isolate* isolate, const ExternalReference& reference) {
|
Isolate* isolate, const ExternalReference& reference) {
|
||||||
Address address = reference.address();
|
Address address = reference.address();
|
||||||
return isolate->root_register_addressable_region().contains(address);
|
return isolate->root_register_addressable_region().contains(address);
|
||||||
}
|
}
|
||||||
|
|
||||||
Tagged_t TurboAssemblerBase::ReadOnlyRootPtr(RootIndex index) {
|
Tagged_t MacroAssemblerBase::ReadOnlyRootPtr(RootIndex index) {
|
||||||
DCHECK(RootsTable::IsReadOnly(index));
|
DCHECK(RootsTable::IsReadOnly(index));
|
||||||
CHECK(V8_STATIC_ROOTS_BOOL);
|
CHECK(V8_STATIC_ROOTS_BOOL);
|
||||||
CHECK(isolate_->root(index).IsHeapObject());
|
CHECK(isolate_->root(index).IsHeapObject());
|
@ -2,8 +2,8 @@
|
|||||||
// Use of this source code is governed by a BSD-style license that can be
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
// found in the LICENSE file.
|
// found in the LICENSE file.
|
||||||
|
|
||||||
#ifndef V8_CODEGEN_TURBO_ASSEMBLER_H_
|
#ifndef V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
|
||||||
#define V8_CODEGEN_TURBO_ASSEMBLER_H_
|
#define V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
|
||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
@ -15,30 +15,24 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
// Common base class for platform-specific TurboAssemblers containing
|
// Common base class for platform-specific MacroAssemblers containing
|
||||||
// platform-independent bits.
|
// platform-independent bits.
|
||||||
// You will encounter two subclasses, TurboAssembler (derives from
|
// TODO(victorgomes): We should use LocalIsolate instead of Isolate in the
|
||||||
// TurboAssemblerBase), and MacroAssembler (derives from TurboAssembler). The
|
// methods of this class.
|
||||||
// main difference is that MacroAssembler is allowed to access the isolate, and
|
class V8_EXPORT_PRIVATE MacroAssemblerBase : public Assembler {
|
||||||
// TurboAssembler accesses the isolate in a very limited way. TurboAssembler
|
|
||||||
// contains all the functionality that is used by Turbofan, and does not expect
|
|
||||||
// to be running on the main thread.
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
|
|
||||||
public:
|
public:
|
||||||
// Constructors are declared public to inherit them in derived classes
|
// Constructors are declared public to inherit them in derived classes
|
||||||
// with `using` directive.
|
// with `using` directive.
|
||||||
TurboAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object,
|
MacroAssemblerBase(Isolate* isolate, CodeObjectRequired create_code_object,
|
||||||
std::unique_ptr<AssemblerBuffer> buffer = {})
|
std::unique_ptr<AssemblerBuffer> buffer = {})
|
||||||
: TurboAssemblerBase(isolate, AssemblerOptions::Default(isolate),
|
: MacroAssemblerBase(isolate, AssemblerOptions::Default(isolate),
|
||||||
create_code_object, std::move(buffer)) {}
|
create_code_object, std::move(buffer)) {}
|
||||||
|
|
||||||
TurboAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
|
MacroAssemblerBase(Isolate* isolate, const AssemblerOptions& options,
|
||||||
CodeObjectRequired create_code_object,
|
CodeObjectRequired create_code_object,
|
||||||
std::unique_ptr<AssemblerBuffer> buffer = {});
|
std::unique_ptr<AssemblerBuffer> buffer = {});
|
||||||
|
|
||||||
Isolate* isolate() const {
|
Isolate* isolate() const { return isolate_; }
|
||||||
return isolate_;
|
|
||||||
}
|
|
||||||
|
|
||||||
Handle<HeapObject> CodeObject() const {
|
Handle<HeapObject> CodeObject() const {
|
||||||
DCHECK(!code_object_.is_null());
|
DCHECK(!code_object_.is_null());
|
||||||
@ -135,25 +129,25 @@ class V8_EXPORT_PRIVATE TurboAssemblerBase : public Assembler {
|
|||||||
|
|
||||||
int comment_depth_ = 0;
|
int comment_depth_ = 0;
|
||||||
|
|
||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(TurboAssemblerBase);
|
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssemblerBase);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Avoids emitting calls to the {Builtin::kAbort} builtin when emitting
|
// Avoids emitting calls to the {Builtin::kAbort} builtin when emitting
|
||||||
// debug code during the lifetime of this scope object.
|
// debug code during the lifetime of this scope object.
|
||||||
class V8_NODISCARD HardAbortScope {
|
class V8_NODISCARD HardAbortScope {
|
||||||
public:
|
public:
|
||||||
explicit HardAbortScope(TurboAssemblerBase* assembler)
|
explicit HardAbortScope(MacroAssemblerBase* assembler)
|
||||||
: assembler_(assembler), old_value_(assembler->should_abort_hard()) {
|
: assembler_(assembler), old_value_(assembler->should_abort_hard()) {
|
||||||
assembler_->set_abort_hard(true);
|
assembler_->set_abort_hard(true);
|
||||||
}
|
}
|
||||||
~HardAbortScope() { assembler_->set_abort_hard(old_value_); }
|
~HardAbortScope() { assembler_->set_abort_hard(old_value_); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
TurboAssemblerBase* assembler_;
|
MacroAssemblerBase* assembler_;
|
||||||
bool old_value_;
|
bool old_value_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace internal
|
} // namespace internal
|
||||||
} // namespace v8
|
} // namespace v8
|
||||||
|
|
||||||
#endif // V8_CODEGEN_TURBO_ASSEMBLER_H_
|
#endif // V8_CODEGEN_MACRO_ASSEMBLER_BASE_H_
|
@ -5,7 +5,7 @@
|
|||||||
#ifndef V8_CODEGEN_MACRO_ASSEMBLER_H_
|
#ifndef V8_CODEGEN_MACRO_ASSEMBLER_H_
|
||||||
#define V8_CODEGEN_MACRO_ASSEMBLER_H_
|
#define V8_CODEGEN_MACRO_ASSEMBLER_H_
|
||||||
|
|
||||||
#include "src/codegen/turbo-assembler.h"
|
#include "src/codegen/macro-assembler-base.h"
|
||||||
#include "src/execution/frames.h"
|
#include "src/execution/frames.h"
|
||||||
#include "src/heap/heap.h"
|
#include "src/heap/heap.h"
|
||||||
|
|
||||||
@ -82,25 +82,25 @@ static constexpr int kMaxCParameters = 256;
|
|||||||
|
|
||||||
class V8_NODISCARD FrameScope {
|
class V8_NODISCARD FrameScope {
|
||||||
public:
|
public:
|
||||||
explicit FrameScope(TurboAssembler* tasm, StackFrame::Type type)
|
explicit FrameScope(MacroAssembler* masm, StackFrame::Type type)
|
||||||
:
|
:
|
||||||
#ifdef V8_CODE_COMMENTS
|
#ifdef V8_CODE_COMMENTS
|
||||||
comment_(tasm, frame_name(type)),
|
comment_(masm, frame_name(type)),
|
||||||
#endif
|
#endif
|
||||||
tasm_(tasm),
|
masm_(masm),
|
||||||
type_(type),
|
type_(type),
|
||||||
old_has_frame_(tasm->has_frame()) {
|
old_has_frame_(masm->has_frame()) {
|
||||||
tasm->set_has_frame(true);
|
masm->set_has_frame(true);
|
||||||
if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
|
if (type != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
|
||||||
tasm->EnterFrame(type);
|
masm->EnterFrame(type);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
~FrameScope() {
|
~FrameScope() {
|
||||||
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
|
if (type_ != StackFrame::MANUAL && type_ != StackFrame::NO_FRAME_TYPE) {
|
||||||
tasm_->LeaveFrame(type_);
|
masm_->LeaveFrame(type_);
|
||||||
}
|
}
|
||||||
tasm_->set_has_frame(old_has_frame_);
|
masm_->set_has_frame(old_has_frame_);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -125,7 +125,7 @@ class V8_NODISCARD FrameScope {
|
|||||||
Assembler::CodeComment comment_;
|
Assembler::CodeComment comment_;
|
||||||
#endif // V8_CODE_COMMENTS
|
#endif // V8_CODE_COMMENTS
|
||||||
|
|
||||||
TurboAssembler* tasm_;
|
MacroAssembler* masm_;
|
||||||
StackFrame::Type const type_;
|
StackFrame::Type const type_;
|
||||||
bool const old_has_frame_;
|
bool const old_has_frame_;
|
||||||
};
|
};
|
||||||
@ -198,7 +198,7 @@ class V8_NODISCARD AllowExternalCallThatCantCauseGC : public FrameScope {
|
|||||||
// scope object.
|
// scope object.
|
||||||
class V8_NODISCARD NoRootArrayScope {
|
class V8_NODISCARD NoRootArrayScope {
|
||||||
public:
|
public:
|
||||||
explicit NoRootArrayScope(TurboAssembler* masm)
|
explicit NoRootArrayScope(MacroAssembler* masm)
|
||||||
: masm_(masm), old_value_(masm->root_array_available()) {
|
: masm_(masm), old_value_(masm->root_array_available()) {
|
||||||
masm->set_root_array_available(false);
|
masm->set_root_array_available(false);
|
||||||
}
|
}
|
||||||
@ -206,7 +206,7 @@ class V8_NODISCARD NoRootArrayScope {
|
|||||||
~NoRootArrayScope() { masm_->set_root_array_available(old_value_); }
|
~NoRootArrayScope() { masm_->set_root_array_available(old_value_); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
TurboAssembler* masm_;
|
MacroAssembler* masm_;
|
||||||
bool old_value_;
|
bool old_value_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -819,7 +819,7 @@ void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
|
|||||||
Instr instr_b = REGIMM | BGEZAL; // Branch and link.
|
Instr instr_b = REGIMM | BGEZAL; // Branch and link.
|
||||||
instr_b = SetBranchOffset(pos, target_pos, instr_b);
|
instr_b = SetBranchOffset(pos, target_pos, instr_b);
|
||||||
// Correct ra register to point to one instruction after jalr from
|
// Correct ra register to point to one instruction after jalr from
|
||||||
// TurboAssembler::BranchAndLinkLong.
|
// MacroAssembler::BranchAndLinkLong.
|
||||||
Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
|
Instr instr_a = DADDIU | ra.code() << kRsShift | ra.code() << kRtShift |
|
||||||
kOptimizedBranchAndLinkLongReturnOffset;
|
kOptimizedBranchAndLinkLongReturnOffset;
|
||||||
|
|
||||||
|
@ -294,7 +294,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
|
|||||||
|
|
||||||
// Adjust ra register in branch delay slot of bal instruction so to skip
|
// Adjust ra register in branch delay slot of bal instruction so to skip
|
||||||
// instructions not needed after optimization of PIC in
|
// instructions not needed after optimization of PIC in
|
||||||
// TurboAssembler::BranchAndLink method.
|
// MacroAssembler::BranchAndLink method.
|
||||||
|
|
||||||
static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;
|
static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
|
|||||||
return MemOperand(sp, offset);
|
return MemOperand(sp, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||||
public:
|
public:
|
||||||
using TurboAssemblerBase::TurboAssemblerBase;
|
using MacroAssemblerBase::MacroAssemblerBase;
|
||||||
|
|
||||||
// Activation support.
|
// Activation support.
|
||||||
void EnterFrame(StackFrame::Type type);
|
void EnterFrame(StackFrame::Type type);
|
||||||
@ -913,79 +913,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
// Define an exception handler and bind a label.
|
// Define an exception handler and bind a label.
|
||||||
void BindExceptionHandler(Label* label) { bind(label); }
|
void BindExceptionHandler(Label* label) { bind(label); }
|
||||||
|
|
||||||
protected:
|
|
||||||
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
|
|
||||||
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
|
|
||||||
|
|
||||||
private:
|
|
||||||
bool has_double_zero_reg_set_ = false;
|
|
||||||
|
|
||||||
// Performs a truncating conversion of a floating point number as used by
|
|
||||||
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
|
|
||||||
// succeeds, otherwise falls through if result is saturated. On return
|
|
||||||
// 'result' either holds answer, or is clobbered on fall through.
|
|
||||||
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
|
|
||||||
Label* done);
|
|
||||||
|
|
||||||
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
|
|
||||||
FPURegister cmp2);
|
|
||||||
|
|
||||||
void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
|
|
||||||
FPURegister cmp2);
|
|
||||||
|
|
||||||
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
|
|
||||||
MSARegister wt, BranchDelaySlot bd = PROTECT);
|
|
||||||
|
|
||||||
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
|
||||||
int num_double_arguments);
|
|
||||||
|
|
||||||
// TODO(mips) Reorder parameters so out parameters come last.
|
|
||||||
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
|
|
||||||
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
|
|
||||||
Register* scratch, const Operand& rt);
|
|
||||||
|
|
||||||
void BranchShortHelperR6(int32_t offset, Label* L);
|
|
||||||
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
|
|
||||||
bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
|
|
||||||
Register rs, const Operand& rt);
|
|
||||||
bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
|
|
||||||
const Operand& rt, BranchDelaySlot bdslot);
|
|
||||||
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
|
|
||||||
const Operand& rt, BranchDelaySlot bdslot);
|
|
||||||
|
|
||||||
void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
|
|
||||||
void BranchAndLinkShortHelper(int16_t offset, Label* L,
|
|
||||||
BranchDelaySlot bdslot);
|
|
||||||
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
|
|
||||||
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
|
|
||||||
bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
|
|
||||||
Register rs, const Operand& rt);
|
|
||||||
bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
|
|
||||||
Register rs, const Operand& rt,
|
|
||||||
BranchDelaySlot bdslot);
|
|
||||||
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
|
|
||||||
Register rs, const Operand& rt,
|
|
||||||
BranchDelaySlot bdslot);
|
|
||||||
void BranchLong(Label* L, BranchDelaySlot bdslot);
|
|
||||||
void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
|
|
||||||
|
|
||||||
template <typename RoundFunc>
|
|
||||||
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
|
|
||||||
RoundFunc round);
|
|
||||||
|
|
||||||
template <typename RoundFunc>
|
|
||||||
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
|
|
||||||
RoundFunc round);
|
|
||||||
|
|
||||||
// Push a fixed frame, consisting of ra, fp.
|
|
||||||
void PushCommonFrame(Register marker_reg = no_reg);
|
|
||||||
};
|
|
||||||
|
|
||||||
// MacroAssembler implements a collection of frequently used macros.
|
|
||||||
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|
||||||
public:
|
|
||||||
using TurboAssembler::TurboAssembler;
|
|
||||||
|
|
||||||
// It assumes that the arguments are located below the stack pointer.
|
// It assumes that the arguments are located below the stack pointer.
|
||||||
// argc is the number of arguments not including the receiver.
|
// argc is the number of arguments not including the receiver.
|
||||||
// TODO(victorgomes): Remove this function once we stick with the reversed
|
// TODO(victorgomes): Remove this function once we stick with the reversed
|
||||||
@ -1087,9 +1014,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
|
void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft,
|
||||||
FPURegister scratch);
|
FPURegister scratch);
|
||||||
|
|
||||||
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
|
|
||||||
MSARegister wt, BranchDelaySlot bd = PROTECT);
|
|
||||||
|
|
||||||
// Enter exit frame.
|
// Enter exit frame.
|
||||||
// argc - argument count to be dropped by LeaveExitFrame.
|
// argc - argument count to be dropped by LeaveExitFrame.
|
||||||
// stack_space - extra stack space.
|
// stack_space - extra stack space.
|
||||||
@ -1269,17 +1193,83 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
DecodeField<Field>(reg, reg);
|
DecodeField<Field>(reg, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
|
||||||
|
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
bool has_double_zero_reg_set_ = false;
|
||||||
|
|
||||||
// Helper functions for generating invokes.
|
// Helper functions for generating invokes.
|
||||||
void InvokePrologue(Register expected_parameter_count,
|
void InvokePrologue(Register expected_parameter_count,
|
||||||
Register actual_parameter_count, Label* done,
|
Register actual_parameter_count, Label* done,
|
||||||
InvokeType type);
|
InvokeType type);
|
||||||
|
|
||||||
|
// Performs a truncating conversion of a floating point number as used by
|
||||||
|
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
|
||||||
|
// succeeds, otherwise falls through if result is saturated. On return
|
||||||
|
// 'result' either holds answer, or is clobbered on fall through.
|
||||||
|
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
|
||||||
|
Label* done);
|
||||||
|
|
||||||
|
void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1,
|
||||||
|
FPURegister cmp2);
|
||||||
|
|
||||||
|
void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1,
|
||||||
|
FPURegister cmp2);
|
||||||
|
|
||||||
|
void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond,
|
||||||
|
MSARegister wt, BranchDelaySlot bd = PROTECT);
|
||||||
|
|
||||||
|
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
||||||
|
int num_double_arguments);
|
||||||
|
|
||||||
|
// TODO(mips) Reorder parameters so out parameters come last.
|
||||||
|
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
|
||||||
|
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
|
||||||
|
Register* scratch, const Operand& rt);
|
||||||
|
|
||||||
|
void BranchShortHelperR6(int32_t offset, Label* L);
|
||||||
|
void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot);
|
||||||
|
bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond,
|
||||||
|
Register rs, const Operand& rt);
|
||||||
|
bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs,
|
||||||
|
const Operand& rt, BranchDelaySlot bdslot);
|
||||||
|
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
|
||||||
|
const Operand& rt, BranchDelaySlot bdslot);
|
||||||
|
|
||||||
|
void BranchAndLinkShortHelperR6(int32_t offset, Label* L);
|
||||||
|
void BranchAndLinkShortHelper(int16_t offset, Label* L,
|
||||||
|
BranchDelaySlot bdslot);
|
||||||
|
void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT);
|
||||||
|
void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
|
||||||
|
bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond,
|
||||||
|
Register rs, const Operand& rt);
|
||||||
|
bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond,
|
||||||
|
Register rs, const Operand& rt,
|
||||||
|
BranchDelaySlot bdslot);
|
||||||
|
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
|
||||||
|
Register rs, const Operand& rt,
|
||||||
|
BranchDelaySlot bdslot);
|
||||||
|
void BranchLong(Label* L, BranchDelaySlot bdslot);
|
||||||
|
void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot);
|
||||||
|
|
||||||
|
template <typename RoundFunc>
|
||||||
|
void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode,
|
||||||
|
RoundFunc round);
|
||||||
|
|
||||||
|
template <typename RoundFunc>
|
||||||
|
void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode,
|
||||||
|
RoundFunc round);
|
||||||
|
|
||||||
|
// Push a fixed frame, consisting of ra, fp.
|
||||||
|
void PushCommonFrame(Register marker_reg = no_reg);
|
||||||
|
|
||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
|
DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
|
void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
|
||||||
Func GetLabelFunction) {
|
Func GetLabelFunction) {
|
||||||
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
|
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
|
||||||
// addresses.
|
// addresses.
|
||||||
|
@ -148,7 +148,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc,
|
|||||||
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||||
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
||||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base,
|
cage_base,
|
||||||
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
||||||
} else {
|
} else {
|
||||||
|
@ -1570,7 +1570,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
friend class Assembler;
|
friend class Assembler;
|
||||||
friend class TurboAssembler;
|
friend class MacroAssembler;
|
||||||
|
|
||||||
Assembler* assembler_;
|
Assembler* assembler_;
|
||||||
RegList old_available_;
|
RegList old_available_;
|
||||||
|
@ -151,7 +151,7 @@ enum Condition {
|
|||||||
kNotZero = 16,
|
kNotZero = 16,
|
||||||
};
|
};
|
||||||
|
|
||||||
inline Condition check_condition(Condition cond) {
|
inline Condition to_condition(Condition cond) {
|
||||||
switch (cond) {
|
switch (cond) {
|
||||||
case kUnsignedLessThan:
|
case kUnsignedLessThan:
|
||||||
return lt;
|
return lt;
|
||||||
@ -171,6 +171,31 @@ inline Condition check_condition(Condition cond) {
|
|||||||
return cond;
|
return cond;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline bool is_signed(Condition cond) {
|
||||||
|
switch (cond) {
|
||||||
|
case kEqual:
|
||||||
|
case kNotEqual:
|
||||||
|
case kLessThan:
|
||||||
|
case kGreaterThan:
|
||||||
|
case kLessThanEqual:
|
||||||
|
case kGreaterThanEqual:
|
||||||
|
case kOverflow:
|
||||||
|
case kNoOverflow:
|
||||||
|
case kZero:
|
||||||
|
case kNotZero:
|
||||||
|
return true;
|
||||||
|
|
||||||
|
case kUnsignedLessThan:
|
||||||
|
case kUnsignedGreaterThan:
|
||||||
|
case kUnsignedLessThanEqual:
|
||||||
|
case kUnsignedGreaterThanEqual:
|
||||||
|
return false;
|
||||||
|
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
inline Condition NegateCondition(Condition cond) {
|
inline Condition NegateCondition(Condition cond) {
|
||||||
DCHECK(cond != al);
|
DCHECK(cond != al);
|
||||||
return static_cast<Condition>(cond ^ ne);
|
return static_cast<Condition>(cond ^ ne);
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -47,9 +47,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
|
|||||||
#define ClearRightImm clrrwi
|
#define ClearRightImm clrrwi
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||||
public:
|
public:
|
||||||
using TurboAssemblerBase::TurboAssemblerBase;
|
using MacroAssemblerBase::MacroAssemblerBase;
|
||||||
|
|
||||||
void CallBuiltin(Builtin builtin, Condition cond = al);
|
void CallBuiltin(Builtin builtin, Condition cond = al);
|
||||||
void TailCallBuiltin(Builtin builtin, Condition cond = al,
|
void TailCallBuiltin(Builtin builtin, Condition cond = al,
|
||||||
@ -1010,19 +1010,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
// compression is enabled.
|
void LoadTaggedField(const Register& destination,
|
||||||
void LoadTaggedPointerField(const Register& destination,
|
const MemOperand& field_operand,
|
||||||
const MemOperand& field_operand,
|
const Register& scratch = no_reg);
|
||||||
const Register& scratch = no_reg);
|
|
||||||
void LoadTaggedSignedField(Register destination, MemOperand field_operand,
|
void LoadTaggedSignedField(Register destination, MemOperand field_operand,
|
||||||
Register scratch);
|
Register scratch);
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
|
||||||
void LoadAnyTaggedField(const Register& destination,
|
|
||||||
const MemOperand& field_operand,
|
|
||||||
const Register& scratch = no_reg);
|
|
||||||
|
|
||||||
// Compresses and stores tagged value to given on-heap location.
|
// Compresses and stores tagged value to given on-heap location.
|
||||||
void StoreTaggedField(const Register& value,
|
void StoreTaggedField(const Register& value,
|
||||||
const MemOperand& dst_field_operand,
|
const MemOperand& dst_field_operand,
|
||||||
@ -1030,11 +1024,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
|
|
||||||
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
||||||
void DecompressTaggedSigned(Register destination, Register src);
|
void DecompressTaggedSigned(Register destination, Register src);
|
||||||
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
|
void DecompressTagged(Register destination, MemOperand field_operand);
|
||||||
void DecompressTaggedPointer(Register destination, Register source);
|
void DecompressTagged(Register destination, Register source);
|
||||||
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
|
void DecompressTagged(const Register& destination, Tagged_t immediate);
|
||||||
void DecompressAnyTagged(Register destination, MemOperand field_operand);
|
|
||||||
void DecompressAnyTagged(Register destination, Register source);
|
|
||||||
|
|
||||||
void LoadF64(DoubleRegister dst, const MemOperand& mem,
|
void LoadF64(DoubleRegister dst, const MemOperand& mem,
|
||||||
Register scratch = no_reg);
|
Register scratch = no_reg);
|
||||||
@ -1438,21 +1430,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
void S128Select(Simd128Register dst, Simd128Register src1,
|
void S128Select(Simd128Register dst, Simd128Register src1,
|
||||||
Simd128Register src2, Simd128Register mask);
|
Simd128Register src2, Simd128Register mask);
|
||||||
|
|
||||||
private:
|
|
||||||
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
|
||||||
|
|
||||||
int CalculateStackPassedWords(int num_reg_arguments,
|
|
||||||
int num_double_arguments);
|
|
||||||
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
|
||||||
int num_double_arguments,
|
|
||||||
bool has_function_descriptor);
|
|
||||||
};
|
|
||||||
|
|
||||||
// MacroAssembler implements a collection of frequently used acros.
|
|
||||||
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|
||||||
public:
|
|
||||||
using TurboAssembler::TurboAssembler;
|
|
||||||
|
|
||||||
// It assumes that the arguments are located below the stack pointer.
|
// It assumes that the arguments are located below the stack pointer.
|
||||||
// argc is the number of arguments not including the receiver.
|
// argc is the number of arguments not including the receiver.
|
||||||
// TODO(victorgomes): Remove this function once we stick with the reversed
|
// TODO(victorgomes): Remove this function once we stick with the reversed
|
||||||
@ -1745,6 +1722,12 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
private:
|
private:
|
||||||
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
||||||
|
|
||||||
|
int CalculateStackPassedWords(int num_reg_arguments,
|
||||||
|
int num_double_arguments);
|
||||||
|
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
||||||
|
int num_double_arguments,
|
||||||
|
bool has_function_descriptor);
|
||||||
|
|
||||||
// Helper functions for generating invokes.
|
// Helper functions for generating invokes.
|
||||||
void InvokePrologue(Register expected_parameter_count,
|
void InvokePrologue(Register expected_parameter_count,
|
||||||
Register actual_parameter_count, Label* done,
|
Register actual_parameter_count, Label* done,
|
||||||
|
@ -162,7 +162,7 @@ void Assembler::deserialization_set_target_internal_reference_at(
|
|||||||
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||||
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
||||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base,
|
cage_base,
|
||||||
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
||||||
} else {
|
} else {
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -90,9 +90,9 @@ inline MemOperand CFunctionArgumentOperand(int index) {
|
|||||||
return MemOperand(sp, offset);
|
return MemOperand(sp, offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||||
public:
|
public:
|
||||||
using TurboAssemblerBase::TurboAssemblerBase;
|
using MacroAssemblerBase::MacroAssemblerBase;
|
||||||
|
|
||||||
// Activation support.
|
// Activation support.
|
||||||
void EnterFrame(StackFrame::Type type);
|
void EnterFrame(StackFrame::Type type);
|
||||||
@ -1072,14 +1072,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Pointer compression Support
|
// Pointer compression Support
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
|
||||||
// compression is enabled.
|
|
||||||
void LoadTaggedPointerField(const Register& destination,
|
|
||||||
const MemOperand& field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
void LoadAnyTaggedField(const Register& destination,
|
void LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
|
|
||||||
// Loads a field containing a tagged signed value and decompresses it if
|
// Loads a field containing a tagged signed value and decompresses it if
|
||||||
// necessary.
|
// necessary.
|
||||||
@ -1095,12 +1090,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
|
|
||||||
void DecompressTaggedSigned(const Register& destination,
|
void DecompressTaggedSigned(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
void DecompressTaggedPointer(const Register& destination,
|
void DecompressTagged(const Register& destination,
|
||||||
const MemOperand& field_operand);
|
const MemOperand& field_operand);
|
||||||
void DecompressTaggedPointer(const Register& destination,
|
void DecompressTagged(const Register& destination, const Register& source);
|
||||||
const Register& source);
|
|
||||||
void DecompressAnyTagged(const Register& destination,
|
|
||||||
const MemOperand& field_operand);
|
|
||||||
void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
|
void CmpTagged(const Register& rd, const Register& rs1, const Register& rs2) {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
Sub32(rd, rs1, rs2);
|
Sub32(rd, rs1, rs2);
|
||||||
@ -1113,12 +1105,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
// Pointer compression Support
|
// Pointer compression Support
|
||||||
// rv32 don't support Pointer compression. Defines these functions for
|
// rv32 don't support Pointer compression. Defines these functions for
|
||||||
// simplify builtins.
|
// simplify builtins.
|
||||||
inline void LoadTaggedPointerField(const Register& destination,
|
inline void LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand) {
|
const MemOperand& field_operand) {
|
||||||
Lw(destination, field_operand);
|
|
||||||
}
|
|
||||||
inline void LoadAnyTaggedField(const Register& destination,
|
|
||||||
const MemOperand& field_operand) {
|
|
||||||
Lw(destination, field_operand);
|
Lw(destination, field_operand);
|
||||||
}
|
}
|
||||||
inline void LoadTaggedSignedField(const Register& destination,
|
inline void LoadTaggedSignedField(const Register& destination,
|
||||||
@ -1174,71 +1162,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
|
void LoadLane(int sz, VRegister dst, uint8_t laneidx, MemOperand src);
|
||||||
void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
|
void StoreLane(int sz, VRegister src, uint8_t laneidx, MemOperand dst);
|
||||||
|
|
||||||
protected:
|
|
||||||
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
|
|
||||||
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
|
|
||||||
|
|
||||||
private:
|
|
||||||
bool has_double_zero_reg_set_ = false;
|
|
||||||
bool has_single_zero_reg_set_ = false;
|
|
||||||
|
|
||||||
// Performs a truncating conversion of a floating point number as used by
|
|
||||||
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
|
|
||||||
// succeeds, otherwise falls through if result is saturated. On return
|
|
||||||
// 'result' either holds answer, or is clobbered on fall through.
|
|
||||||
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
|
|
||||||
Label* done);
|
|
||||||
|
|
||||||
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
|
||||||
int num_double_arguments);
|
|
||||||
|
|
||||||
// TODO(RISCV) Reorder parameters so out parameters come last.
|
|
||||||
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
|
|
||||||
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
|
|
||||||
Register* scratch, const Operand& rt);
|
|
||||||
|
|
||||||
void BranchShortHelper(int32_t offset, Label* L);
|
|
||||||
bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
|
|
||||||
const Operand& rt);
|
|
||||||
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
|
|
||||||
const Operand& rt);
|
|
||||||
|
|
||||||
void BranchAndLinkShortHelper(int32_t offset, Label* L);
|
|
||||||
void BranchAndLinkShort(int32_t offset);
|
|
||||||
void BranchAndLinkShort(Label* L);
|
|
||||||
bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
|
|
||||||
Register rs, const Operand& rt);
|
|
||||||
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
|
|
||||||
Register rs, const Operand& rt);
|
|
||||||
void BranchAndLinkLong(Label* L);
|
|
||||||
#if V8_TARGET_ARCH_RISCV64
|
|
||||||
template <typename F_TYPE>
|
|
||||||
void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
|
|
||||||
FPURoundingMode mode);
|
|
||||||
#elif V8_TARGET_ARCH_RISCV32
|
|
||||||
void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
|
|
||||||
FPURoundingMode mode);
|
|
||||||
|
|
||||||
void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
|
|
||||||
FPURoundingMode mode);
|
|
||||||
#endif
|
|
||||||
template <typename F>
|
|
||||||
void RoundHelper(VRegister dst, VRegister src, Register scratch,
|
|
||||||
VRegister v_scratch, FPURoundingMode frm);
|
|
||||||
|
|
||||||
template <typename TruncFunc>
|
|
||||||
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
|
|
||||||
TruncFunc trunc);
|
|
||||||
|
|
||||||
// Push a fixed frame, consisting of ra, fp.
|
|
||||||
void PushCommonFrame(Register marker_reg = no_reg);
|
|
||||||
};
|
|
||||||
|
|
||||||
// MacroAssembler implements a collection of frequently used macros.
|
|
||||||
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|
||||||
public:
|
|
||||||
using TurboAssembler::TurboAssembler;
|
|
||||||
|
|
||||||
// It assumes that the arguments are located below the stack pointer.
|
// It assumes that the arguments are located below the stack pointer.
|
||||||
// argc is the number of arguments not including the receiver.
|
// argc is the number of arguments not including the receiver.
|
||||||
// TODO(victorgomes): Remove this function once we stick with the reversed
|
// TODO(victorgomes): Remove this function once we stick with the reversed
|
||||||
@ -1521,7 +1444,65 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
DecodeField<Field>(reg, reg);
|
DecodeField<Field>(reg, reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected:
|
||||||
|
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
|
||||||
|
inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
bool has_double_zero_reg_set_ = false;
|
||||||
|
bool has_single_zero_reg_set_ = false;
|
||||||
|
|
||||||
|
// Performs a truncating conversion of a floating point number as used by
|
||||||
|
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
|
||||||
|
// succeeds, otherwise falls through if result is saturated. On return
|
||||||
|
// 'result' either holds answer, or is clobbered on fall through.
|
||||||
|
void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
|
||||||
|
Label* done);
|
||||||
|
|
||||||
|
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
||||||
|
int num_double_arguments);
|
||||||
|
|
||||||
|
// TODO(RISCV) Reorder parameters so out parameters come last.
|
||||||
|
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits);
|
||||||
|
bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits,
|
||||||
|
Register* scratch, const Operand& rt);
|
||||||
|
|
||||||
|
void BranchShortHelper(int32_t offset, Label* L);
|
||||||
|
bool BranchShortHelper(int32_t offset, Label* L, Condition cond, Register rs,
|
||||||
|
const Operand& rt);
|
||||||
|
bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs,
|
||||||
|
const Operand& rt);
|
||||||
|
|
||||||
|
void BranchAndLinkShortHelper(int32_t offset, Label* L);
|
||||||
|
void BranchAndLinkShort(int32_t offset);
|
||||||
|
void BranchAndLinkShort(Label* L);
|
||||||
|
bool BranchAndLinkShortHelper(int32_t offset, Label* L, Condition cond,
|
||||||
|
Register rs, const Operand& rt);
|
||||||
|
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
|
||||||
|
Register rs, const Operand& rt);
|
||||||
|
void BranchAndLinkLong(Label* L);
|
||||||
|
#if V8_TARGET_ARCH_RISCV64
|
||||||
|
template <typename F_TYPE>
|
||||||
|
void RoundHelper(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
|
||||||
|
FPURoundingMode mode);
|
||||||
|
#elif V8_TARGET_ARCH_RISCV32
|
||||||
|
void RoundDouble(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
|
||||||
|
FPURoundingMode mode);
|
||||||
|
|
||||||
|
void RoundFloat(FPURegister dst, FPURegister src, FPURegister fpu_scratch,
|
||||||
|
FPURoundingMode mode);
|
||||||
|
#endif
|
||||||
|
template <typename F>
|
||||||
|
void RoundHelper(VRegister dst, VRegister src, Register scratch,
|
||||||
|
VRegister v_scratch, FPURoundingMode frm);
|
||||||
|
|
||||||
|
template <typename TruncFunc>
|
||||||
|
void RoundFloatingPointToInteger(Register rd, FPURegister fs, Register result,
|
||||||
|
TruncFunc trunc);
|
||||||
|
|
||||||
|
// Push a fixed frame, consisting of ra, fp.
|
||||||
|
void PushCommonFrame(Register marker_reg = no_reg);
|
||||||
|
|
||||||
// Helper functions for generating invokes.
|
// Helper functions for generating invokes.
|
||||||
void InvokePrologue(Register expected_parameter_count,
|
void InvokePrologue(Register expected_parameter_count,
|
||||||
Register actual_parameter_count, Label* done,
|
Register actual_parameter_count, Label* done,
|
||||||
@ -1538,7 +1519,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
};
|
};
|
||||||
|
|
||||||
template <typename Func>
|
template <typename Func>
|
||||||
void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
|
void MacroAssembler::GenerateSwitchTable(Register index, size_t case_count,
|
||||||
Func GetLabelFunction) {
|
Func GetLabelFunction) {
|
||||||
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
|
// Ensure that dd-ed labels following this instruction use 8 bytes aligned
|
||||||
// addresses.
|
// addresses.
|
||||||
|
@ -142,7 +142,7 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
|
|||||||
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
||||||
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_));
|
||||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||||
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTaggedAny(
|
return HeapObject::cast(Object(V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base,
|
cage_base,
|
||||||
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
Assembler::target_compressed_address_at(pc_, constant_pool_))));
|
||||||
} else {
|
} else {
|
||||||
|
@ -1494,7 +1494,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
friend class Assembler;
|
friend class Assembler;
|
||||||
friend class TurboAssembler;
|
friend class MacroAssembler;
|
||||||
|
|
||||||
Assembler* assembler_;
|
Assembler* assembler_;
|
||||||
RegList old_available_;
|
RegList old_available_;
|
||||||
|
@ -123,7 +123,7 @@ enum Condition {
|
|||||||
kNotZero = 21,
|
kNotZero = 21,
|
||||||
};
|
};
|
||||||
|
|
||||||
inline Condition check_condition(Condition cond) {
|
inline Condition to_condition(Condition cond) {
|
||||||
switch (cond) {
|
switch (cond) {
|
||||||
case kUnsignedLessThan:
|
case kUnsignedLessThan:
|
||||||
return lt;
|
return lt;
|
||||||
@ -143,6 +143,31 @@ inline Condition check_condition(Condition cond) {
|
|||||||
return cond;
|
return cond;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline bool is_signed(Condition cond) {
|
||||||
|
switch (cond) {
|
||||||
|
case kEqual:
|
||||||
|
case kNotEqual:
|
||||||
|
case kLessThan:
|
||||||
|
case kGreaterThan:
|
||||||
|
case kLessThanEqual:
|
||||||
|
case kGreaterThanEqual:
|
||||||
|
case kOverflow:
|
||||||
|
case kNoOverflow:
|
||||||
|
case kZero:
|
||||||
|
case kNotZero:
|
||||||
|
return true;
|
||||||
|
|
||||||
|
case kUnsignedLessThan:
|
||||||
|
case kUnsignedGreaterThan:
|
||||||
|
case kUnsignedLessThanEqual:
|
||||||
|
case kUnsignedGreaterThanEqual:
|
||||||
|
return false;
|
||||||
|
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
inline Condition NegateCondition(Condition cond) {
|
inline Condition NegateCondition(Condition cond) {
|
||||||
DCHECK(cond != al);
|
DCHECK(cond != al);
|
||||||
switch (cond) {
|
switch (cond) {
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -41,9 +41,9 @@ Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
|
|||||||
Register reg5 = no_reg,
|
Register reg5 = no_reg,
|
||||||
Register reg6 = no_reg);
|
Register reg6 = no_reg);
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
class V8_EXPORT_PRIVATE MacroAssembler : public MacroAssemblerBase {
|
||||||
public:
|
public:
|
||||||
using TurboAssemblerBase::TurboAssemblerBase;
|
using MacroAssemblerBase::MacroAssemblerBase;
|
||||||
|
|
||||||
void CallBuiltin(Builtin builtin, Condition cond = al);
|
void CallBuiltin(Builtin builtin, Condition cond = al);
|
||||||
void TailCallBuiltin(Builtin builtin, Condition cond = al);
|
void TailCallBuiltin(Builtin builtin, Condition cond = al);
|
||||||
@ -1464,17 +1464,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
|
||||||
// compression is enabled.
|
|
||||||
void LoadTaggedPointerField(const Register& destination,
|
|
||||||
const MemOperand& field_operand,
|
|
||||||
const Register& scratch = no_reg);
|
|
||||||
void LoadTaggedSignedField(Register destination, MemOperand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
void LoadAnyTaggedField(const Register& destination,
|
void LoadTaggedField(const Register& destination,
|
||||||
const MemOperand& field_operand,
|
const MemOperand& field_operand,
|
||||||
const Register& scratch = no_reg);
|
const Register& scratch = no_reg);
|
||||||
|
void LoadTaggedSignedField(Register destination, MemOperand field_operand);
|
||||||
|
|
||||||
// Loads a field containing smi value and untags it.
|
// Loads a field containing smi value and untags it.
|
||||||
void SmiUntagField(Register dst, const MemOperand& src);
|
void SmiUntagField(Register dst, const MemOperand& src);
|
||||||
@ -1486,11 +1480,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
|
|
||||||
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
void DecompressTaggedSigned(Register destination, MemOperand field_operand);
|
||||||
void DecompressTaggedSigned(Register destination, Register src);
|
void DecompressTaggedSigned(Register destination, Register src);
|
||||||
void DecompressTaggedPointer(Register destination, MemOperand field_operand);
|
void DecompressTagged(Register destination, MemOperand field_operand);
|
||||||
void DecompressTaggedPointer(Register destination, Register source);
|
void DecompressTagged(Register destination, Register source);
|
||||||
void DecompressTaggedPointer(const Register& destination, Tagged_t immediate);
|
void DecompressTagged(const Register& destination, Tagged_t immediate);
|
||||||
void DecompressAnyTagged(Register destination, MemOperand field_operand);
|
|
||||||
void DecompressAnyTagged(Register destination, Register source);
|
|
||||||
|
|
||||||
// CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
|
// CountLeadingZeros will corrupt the scratch register pair (eg. r0:r1)
|
||||||
void CountLeadingZerosU32(Register dst, Register src,
|
void CountLeadingZerosU32(Register dst, Register src,
|
||||||
@ -1502,22 +1494,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
|
|||||||
void CountTrailingZerosU64(Register dst, Register src,
|
void CountTrailingZerosU64(Register dst, Register src,
|
||||||
Register scratch_pair = r0);
|
Register scratch_pair = r0);
|
||||||
|
|
||||||
private:
|
|
||||||
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
|
||||||
|
|
||||||
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
|
||||||
int num_double_arguments);
|
|
||||||
|
|
||||||
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
|
|
||||||
int CalculateStackPassedWords(int num_reg_arguments,
|
|
||||||
int num_double_arguments);
|
|
||||||
};
|
|
||||||
|
|
||||||
// MacroAssembler implements a collection of frequently used macros.
|
|
||||||
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|
||||||
public:
|
|
||||||
using TurboAssembler::TurboAssembler;
|
|
||||||
|
|
||||||
void LoadStackLimit(Register destination, StackLimitKind kind);
|
void LoadStackLimit(Register destination, StackLimitKind kind);
|
||||||
// It assumes that the arguments are located below the stack pointer.
|
// It assumes that the arguments are located below the stack pointer.
|
||||||
// argc is the number of arguments not including the receiver.
|
// argc is the number of arguments not including the receiver.
|
||||||
@ -1803,6 +1779,14 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
||||||
|
|
||||||
|
void CallCFunctionHelper(Register function, int num_reg_arguments,
|
||||||
|
int num_double_arguments);
|
||||||
|
|
||||||
|
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
|
||||||
|
int CalculateStackPassedWords(int num_reg_arguments,
|
||||||
|
int num_double_arguments);
|
||||||
|
|
||||||
// Helper functions for generating invokes.
|
// Helper functions for generating invokes.
|
||||||
void InvokePrologue(Register expected_parameter_count,
|
void InvokePrologue(Register expected_parameter_count,
|
||||||
Register actual_parameter_count, Label* done,
|
Register actual_parameter_count, Label* done,
|
||||||
|
@ -27,7 +27,7 @@
|
|||||||
namespace v8 {
|
namespace v8 {
|
||||||
namespace internal {
|
namespace internal {
|
||||||
|
|
||||||
void SharedTurboAssembler::Move(Register dst, uint32_t src) {
|
void SharedMacroAssemblerBase::Move(Register dst, uint32_t src) {
|
||||||
// Helper to paper over the different assembler function names.
|
// Helper to paper over the different assembler function names.
|
||||||
#if V8_TARGET_ARCH_IA32
|
#if V8_TARGET_ARCH_IA32
|
||||||
mov(dst, Immediate(src));
|
mov(dst, Immediate(src));
|
||||||
@ -38,7 +38,7 @@ void SharedTurboAssembler::Move(Register dst, uint32_t src) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::Move(Register dst, Register src) {
|
void SharedMacroAssemblerBase::Move(Register dst, Register src) {
|
||||||
// Helper to paper over the different assembler function names.
|
// Helper to paper over the different assembler function names.
|
||||||
if (dst != src) {
|
if (dst != src) {
|
||||||
#if V8_TARGET_ARCH_IA32
|
#if V8_TARGET_ARCH_IA32
|
||||||
@ -51,7 +51,7 @@ void SharedTurboAssembler::Move(Register dst, Register src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::Add(Register dst, Immediate src) {
|
void SharedMacroAssemblerBase::Add(Register dst, Immediate src) {
|
||||||
// Helper to paper over the different assembler function names.
|
// Helper to paper over the different assembler function names.
|
||||||
#if V8_TARGET_ARCH_IA32
|
#if V8_TARGET_ARCH_IA32
|
||||||
add(dst, src);
|
add(dst, src);
|
||||||
@ -62,7 +62,7 @@ void SharedTurboAssembler::Add(Register dst, Immediate src) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::And(Register dst, Immediate src) {
|
void SharedMacroAssemblerBase::And(Register dst, Immediate src) {
|
||||||
// Helper to paper over the different assembler function names.
|
// Helper to paper over the different assembler function names.
|
||||||
#if V8_TARGET_ARCH_IA32
|
#if V8_TARGET_ARCH_IA32
|
||||||
and_(dst, src);
|
and_(dst, src);
|
||||||
@ -77,8 +77,8 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::Movhps(XMMRegister dst, XMMRegister src1,
|
||||||
Operand src2) {
|
Operand src2) {
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope scope(this, AVX);
|
CpuFeatureScope scope(this, AVX);
|
||||||
vmovhps(dst, src1, src2);
|
vmovhps(dst, src1, src2);
|
||||||
@ -90,8 +90,8 @@ void SharedTurboAssembler::Movhps(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::Movlps(XMMRegister dst, XMMRegister src1,
|
||||||
Operand src2) {
|
Operand src2) {
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope scope(this, AVX);
|
CpuFeatureScope scope(this, AVX);
|
||||||
vmovlps(dst, src1, src2);
|
vmovlps(dst, src1, src2);
|
||||||
@ -102,8 +102,8 @@ void SharedTurboAssembler::Movlps(XMMRegister dst, XMMRegister src1,
|
|||||||
movlps(dst, src2);
|
movlps(dst, src2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::Blendvpd(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister mask) {
|
XMMRegister src2, XMMRegister mask) {
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope scope(this, AVX);
|
CpuFeatureScope scope(this, AVX);
|
||||||
vblendvpd(dst, src1, src2, mask);
|
vblendvpd(dst, src1, src2, mask);
|
||||||
@ -115,8 +115,8 @@ void SharedTurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::Blendvps(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister mask) {
|
XMMRegister src2, XMMRegister mask) {
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope scope(this, AVX);
|
CpuFeatureScope scope(this, AVX);
|
||||||
vblendvps(dst, src1, src2, mask);
|
vblendvps(dst, src1, src2, mask);
|
||||||
@ -128,8 +128,8 @@ void SharedTurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::Pblendvb(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister mask) {
|
XMMRegister src2, XMMRegister mask) {
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope scope(this, AVX);
|
CpuFeatureScope scope(this, AVX);
|
||||||
vpblendvb(dst, src1, src2, mask);
|
vpblendvb(dst, src1, src2, mask);
|
||||||
@ -141,8 +141,8 @@ void SharedTurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::Shufps(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, uint8_t imm8) {
|
XMMRegister src2, uint8_t imm8) {
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
vshufps(dst, src1, src2, imm8);
|
vshufps(dst, src1, src2, imm8);
|
||||||
@ -154,8 +154,8 @@ void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
|
void SharedMacroAssemblerBase::F64x2ExtractLane(DoubleRegister dst,
|
||||||
uint8_t lane) {
|
XMMRegister src, uint8_t lane) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (lane == 0) {
|
if (lane == 0) {
|
||||||
if (dst != src) {
|
if (dst != src) {
|
||||||
@ -173,8 +173,10 @@ void SharedTurboAssembler::F64x2ExtractLane(DoubleRegister dst, XMMRegister src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
|
void SharedMacroAssemblerBase::F64x2ReplaceLane(XMMRegister dst,
|
||||||
DoubleRegister rep, uint8_t lane) {
|
XMMRegister src,
|
||||||
|
DoubleRegister rep,
|
||||||
|
uint8_t lane) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope scope(this, AVX);
|
CpuFeatureScope scope(this, AVX);
|
||||||
@ -197,8 +199,8 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs,
|
void SharedMacroAssemblerBase::F32x4Min(XMMRegister dst, XMMRegister lhs,
|
||||||
XMMRegister rhs, XMMRegister scratch) {
|
XMMRegister rhs, XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// The minps instruction doesn't propagate NaNs and +0's in its first
|
// The minps instruction doesn't propagate NaNs and +0's in its first
|
||||||
// operand. Perform minps in both orders, merge the results, and adjust.
|
// operand. Perform minps in both orders, merge the results, and adjust.
|
||||||
@ -226,8 +228,8 @@ void SharedTurboAssembler::F32x4Min(XMMRegister dst, XMMRegister lhs,
|
|||||||
Andnps(dst, dst, scratch);
|
Andnps(dst, dst, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs,
|
void SharedMacroAssemblerBase::F32x4Max(XMMRegister dst, XMMRegister lhs,
|
||||||
XMMRegister rhs, XMMRegister scratch) {
|
XMMRegister rhs, XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// The maxps instruction doesn't propagate NaNs and +0's in its first
|
// The maxps instruction doesn't propagate NaNs and +0's in its first
|
||||||
// operand. Perform maxps in both orders, merge the results, and adjust.
|
// operand. Perform maxps in both orders, merge the results, and adjust.
|
||||||
@ -258,8 +260,8 @@ void SharedTurboAssembler::F32x4Max(XMMRegister dst, XMMRegister lhs,
|
|||||||
Andnps(dst, dst, scratch);
|
Andnps(dst, dst, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
|
void SharedMacroAssemblerBase::F64x2Min(XMMRegister dst, XMMRegister lhs,
|
||||||
XMMRegister rhs, XMMRegister scratch) {
|
XMMRegister rhs, XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope scope(this, AVX);
|
CpuFeatureScope scope(this, AVX);
|
||||||
@ -296,8 +298,8 @@ void SharedTurboAssembler::F64x2Min(XMMRegister dst, XMMRegister lhs,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
|
void SharedMacroAssemblerBase::F64x2Max(XMMRegister dst, XMMRegister lhs,
|
||||||
XMMRegister rhs, XMMRegister scratch) {
|
XMMRegister rhs, XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope scope(this, AVX);
|
CpuFeatureScope scope(this, AVX);
|
||||||
@ -336,7 +338,7 @@ void SharedTurboAssembler::F64x2Max(XMMRegister dst, XMMRegister lhs,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
|
void SharedMacroAssemblerBase::F32x4Splat(XMMRegister dst, DoubleRegister src) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX2)) {
|
if (CpuFeatures::IsSupported(AVX2)) {
|
||||||
CpuFeatureScope avx2_scope(this, AVX2);
|
CpuFeatureScope avx2_scope(this, AVX2);
|
||||||
@ -354,8 +356,8 @@ void SharedTurboAssembler::F32x4Splat(XMMRegister dst, DoubleRegister src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
|
void SharedMacroAssemblerBase::F32x4ExtractLane(FloatRegister dst,
|
||||||
uint8_t lane) {
|
XMMRegister src, uint8_t lane) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_LT(lane, 4);
|
DCHECK_LT(lane, 4);
|
||||||
// These instructions are shorter than insertps, but will leave junk in
|
// These instructions are shorter than insertps, but will leave junk in
|
||||||
@ -376,8 +378,8 @@ void SharedTurboAssembler::F32x4ExtractLane(FloatRegister dst, XMMRegister src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
|
void SharedMacroAssemblerBase::S128Store32Lane(Operand dst, XMMRegister src,
|
||||||
uint8_t laneidx) {
|
uint8_t laneidx) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (laneidx == 0) {
|
if (laneidx == 0) {
|
||||||
Movss(dst, src);
|
Movss(dst, src);
|
||||||
@ -388,8 +390,8 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op>
|
template <typename Op>
|
||||||
void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
|
void SharedMacroAssemblerBase::I8x16SplatPreAvx2(XMMRegister dst, Op src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(!CpuFeatures::IsSupported(AVX2));
|
DCHECK(!CpuFeatures::IsSupported(AVX2));
|
||||||
CpuFeatureScope ssse3_scope(this, SSSE3);
|
CpuFeatureScope ssse3_scope(this, SSSE3);
|
||||||
@ -398,8 +400,8 @@ void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
|
|||||||
Pshufb(dst, scratch);
|
Pshufb(dst, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
|
void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Register src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX2)) {
|
if (CpuFeatures::IsSupported(AVX2)) {
|
||||||
CpuFeatureScope avx2_scope(this, AVX2);
|
CpuFeatureScope avx2_scope(this, AVX2);
|
||||||
@ -410,8 +412,8 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
|
void SharedMacroAssemblerBase::I8x16Splat(XMMRegister dst, Operand src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_OPERAND_IS_NOT_REG(src);
|
DCHECK_OPERAND_IS_NOT_REG(src);
|
||||||
if (CpuFeatures::IsSupported(AVX2)) {
|
if (CpuFeatures::IsSupported(AVX2)) {
|
||||||
@ -422,9 +424,9 @@ void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1,
|
||||||
uint8_t src2, Register tmp1,
|
uint8_t src2, Register tmp1,
|
||||||
XMMRegister tmp2) {
|
XMMRegister tmp2) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_NE(dst, tmp2);
|
DCHECK_NE(dst, tmp2);
|
||||||
// Perform 16-bit shift, then mask away low bits.
|
// Perform 16-bit shift, then mask away low bits.
|
||||||
@ -444,9 +446,9 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
|
|||||||
Pand(dst, tmp2);
|
Pand(dst, tmp2);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I8x16Shl(XMMRegister dst, XMMRegister src1,
|
||||||
Register src2, Register tmp1,
|
Register src2, Register tmp1,
|
||||||
XMMRegister tmp2, XMMRegister tmp3) {
|
XMMRegister tmp2, XMMRegister tmp3) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(!AreAliased(dst, tmp2, tmp3));
|
DCHECK(!AreAliased(dst, tmp2, tmp3));
|
||||||
DCHECK(!AreAliased(src1, tmp2, tmp3));
|
DCHECK(!AreAliased(src1, tmp2, tmp3));
|
||||||
@ -471,8 +473,8 @@ void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
|
|||||||
Psllw(dst, dst, tmp3);
|
Psllw(dst, dst, tmp3);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1,
|
||||||
uint8_t src2, XMMRegister tmp) {
|
uint8_t src2, XMMRegister tmp) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// Unpack bytes into words, do word (16-bit) shifts, and repack.
|
// Unpack bytes into words, do word (16-bit) shifts, and repack.
|
||||||
DCHECK_NE(dst, tmp);
|
DCHECK_NE(dst, tmp);
|
||||||
@ -485,9 +487,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
|
|||||||
Packsswb(dst, tmp);
|
Packsswb(dst, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I8x16ShrS(XMMRegister dst, XMMRegister src1,
|
||||||
Register src2, Register tmp1,
|
Register src2, Register tmp1,
|
||||||
XMMRegister tmp2, XMMRegister tmp3) {
|
XMMRegister tmp2, XMMRegister tmp3) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(!AreAliased(dst, tmp2, tmp3));
|
DCHECK(!AreAliased(dst, tmp2, tmp3));
|
||||||
DCHECK_NE(src1, tmp2);
|
DCHECK_NE(src1, tmp2);
|
||||||
@ -506,9 +508,9 @@ void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
|
|||||||
Packsswb(dst, tmp2);
|
Packsswb(dst, tmp2);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1,
|
||||||
uint8_t src2, Register tmp1,
|
uint8_t src2, Register tmp1,
|
||||||
XMMRegister tmp2) {
|
XMMRegister tmp2) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_NE(dst, tmp2);
|
DCHECK_NE(dst, tmp2);
|
||||||
if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
|
if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
|
||||||
@ -528,9 +530,9 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
|
|||||||
Pand(dst, tmp2);
|
Pand(dst, tmp2);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I8x16ShrU(XMMRegister dst, XMMRegister src1,
|
||||||
Register src2, Register tmp1,
|
Register src2, Register tmp1,
|
||||||
XMMRegister tmp2, XMMRegister tmp3) {
|
XMMRegister tmp2, XMMRegister tmp3) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(!AreAliased(dst, tmp2, tmp3));
|
DCHECK(!AreAliased(dst, tmp2, tmp3));
|
||||||
DCHECK_NE(src1, tmp2);
|
DCHECK_NE(src1, tmp2);
|
||||||
@ -550,14 +552,14 @@ void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op>
|
template <typename Op>
|
||||||
void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
|
void SharedMacroAssemblerBase::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
|
||||||
DCHECK(!CpuFeatures::IsSupported(AVX2));
|
DCHECK(!CpuFeatures::IsSupported(AVX2));
|
||||||
Movd(dst, src);
|
Movd(dst, src);
|
||||||
Pshuflw(dst, dst, uint8_t{0x0});
|
Pshuflw(dst, dst, uint8_t{0x0});
|
||||||
Punpcklqdq(dst, dst);
|
Punpcklqdq(dst, dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
|
void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Register src) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX2)) {
|
if (CpuFeatures::IsSupported(AVX2)) {
|
||||||
CpuFeatureScope avx2_scope(this, AVX2);
|
CpuFeatureScope avx2_scope(this, AVX2);
|
||||||
@ -568,7 +570,7 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
|
void SharedMacroAssemblerBase::I16x8Splat(XMMRegister dst, Operand src) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_OPERAND_IS_NOT_REG(src);
|
DCHECK_OPERAND_IS_NOT_REG(src);
|
||||||
if (CpuFeatures::IsSupported(AVX2)) {
|
if (CpuFeatures::IsSupported(AVX2)) {
|
||||||
@ -579,18 +581,20 @@ void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister scratch,
|
XMMRegister src2,
|
||||||
bool is_signed) {
|
XMMRegister scratch,
|
||||||
|
bool is_signed) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1);
|
is_signed ? Pmovsxbw(scratch, src1) : Pmovzxbw(scratch, src1);
|
||||||
is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
|
is_signed ? Pmovsxbw(dst, src2) : Pmovzxbw(dst, src2);
|
||||||
Pmullw(dst, scratch);
|
Pmullw(dst, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I16x8ExtMulHighS(XMMRegister dst,
|
||||||
XMMRegister src2,
|
XMMRegister src1,
|
||||||
XMMRegister scratch) {
|
XMMRegister src2,
|
||||||
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -612,9 +616,10 @@ void SharedTurboAssembler::I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I16x8ExtMulHighU(XMMRegister dst,
|
||||||
XMMRegister src2,
|
XMMRegister src1,
|
||||||
XMMRegister scratch) {
|
XMMRegister src2,
|
||||||
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// The logic here is slightly complicated to handle all the cases of register
|
// The logic here is slightly complicated to handle all the cases of register
|
||||||
// aliasing. This allows flexibility for callers in TurboFan and Liftoff.
|
// aliasing. This allows flexibility for callers in TurboFan and Liftoff.
|
||||||
@ -662,8 +667,8 @@ void SharedTurboAssembler::I16x8ExtMulHighU(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
|
void SharedMacroAssemblerBase::I16x8SConvertI8x16High(XMMRegister dst,
|
||||||
XMMRegister src) {
|
XMMRegister src) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -685,9 +690,9 @@ void SharedTurboAssembler::I16x8SConvertI8x16High(XMMRegister dst,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
|
void SharedMacroAssemblerBase::I16x8UConvertI8x16High(XMMRegister dst,
|
||||||
XMMRegister src,
|
XMMRegister src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -711,9 +716,10 @@ void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I16x8Q15MulRSatS(XMMRegister dst,
|
||||||
XMMRegister src2,
|
XMMRegister src1,
|
||||||
XMMRegister scratch) {
|
XMMRegister src2,
|
||||||
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// k = i16x8.splat(0x8000)
|
// k = i16x8.splat(0x8000)
|
||||||
Pcmpeqd(scratch, scratch);
|
Pcmpeqd(scratch, scratch);
|
||||||
@ -729,9 +735,9 @@ void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
|
|||||||
Pxor(dst, scratch);
|
Pxor(dst, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst,
|
void SharedMacroAssemblerBase::I16x8DotI8x16I7x16S(XMMRegister dst,
|
||||||
XMMRegister src1,
|
XMMRegister src1,
|
||||||
XMMRegister src2) {
|
XMMRegister src2) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -744,7 +750,7 @@ void SharedTurboAssembler::I16x8DotI8x16I7x16S(XMMRegister dst,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I32x4DotI8x16I7x16AddS(
|
void SharedMacroAssemblerBase::I32x4DotI8x16I7x16AddS(
|
||||||
XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3,
|
XMMRegister dst, XMMRegister src1, XMMRegister src2, XMMRegister src3,
|
||||||
XMMRegister scratch, XMMRegister splat_reg) {
|
XMMRegister scratch, XMMRegister splat_reg) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
@ -768,9 +774,9 @@ void SharedTurboAssembler::I32x4DotI8x16I7x16AddS(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
|
void SharedMacroAssemblerBase::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
|
||||||
XMMRegister src,
|
XMMRegister src,
|
||||||
XMMRegister tmp) {
|
XMMRegister tmp) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -812,9 +818,10 @@ void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
|
|||||||
// 1. Multiply low word into scratch.
|
// 1. Multiply low word into scratch.
|
||||||
// 2. Multiply high word (can be signed or unsigned) into dst.
|
// 2. Multiply high word (can be signed or unsigned) into dst.
|
||||||
// 3. Unpack and interleave scratch and dst into dst.
|
// 3. Unpack and interleave scratch and dst into dst.
|
||||||
void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister scratch,
|
XMMRegister src2,
|
||||||
bool low, bool is_signed) {
|
XMMRegister scratch, bool low,
|
||||||
|
bool is_signed) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -830,8 +837,8 @@ void SharedTurboAssembler::I32x4ExtMul(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
|
void SharedMacroAssemblerBase::I32x4SConvertI16x8High(XMMRegister dst,
|
||||||
XMMRegister src) {
|
XMMRegister src) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -853,9 +860,9 @@ void SharedTurboAssembler::I32x4SConvertI16x8High(XMMRegister dst,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
|
void SharedMacroAssemblerBase::I32x4UConvertI16x8High(XMMRegister dst,
|
||||||
XMMRegister src,
|
XMMRegister src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -879,8 +886,8 @@ void SharedTurboAssembler::I32x4UConvertI16x8High(XMMRegister dst,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
|
void SharedMacroAssemblerBase::I64x2Neg(XMMRegister dst, XMMRegister src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope scope(this, AVX);
|
CpuFeatureScope scope(this, AVX);
|
||||||
@ -896,8 +903,8 @@ void SharedTurboAssembler::I64x2Neg(XMMRegister dst, XMMRegister src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
|
void SharedMacroAssemblerBase::I64x2Abs(XMMRegister dst, XMMRegister src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -917,8 +924,8 @@ void SharedTurboAssembler::I64x2Abs(XMMRegister dst, XMMRegister src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
|
void SharedMacroAssemblerBase::I64x2GtS(XMMRegister dst, XMMRegister src0,
|
||||||
XMMRegister src1, XMMRegister scratch) {
|
XMMRegister src1, XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -951,8 +958,8 @@ void SharedTurboAssembler::I64x2GtS(XMMRegister dst, XMMRegister src0,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
|
void SharedMacroAssemblerBase::I64x2GeS(XMMRegister dst, XMMRegister src0,
|
||||||
XMMRegister src1, XMMRegister scratch) {
|
XMMRegister src1, XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -986,8 +993,8 @@ void SharedTurboAssembler::I64x2GeS(XMMRegister dst, XMMRegister src0,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
|
void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src,
|
||||||
uint8_t shift, XMMRegister xmm_tmp) {
|
uint8_t shift, XMMRegister xmm_tmp) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_GT(64, shift);
|
DCHECK_GT(64, shift);
|
||||||
DCHECK_NE(xmm_tmp, dst);
|
DCHECK_NE(xmm_tmp, dst);
|
||||||
@ -1019,10 +1026,10 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
|
|||||||
Psubq(dst, xmm_tmp);
|
Psubq(dst, xmm_tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
|
void SharedMacroAssemblerBase::I64x2ShrS(XMMRegister dst, XMMRegister src,
|
||||||
Register shift, XMMRegister xmm_tmp,
|
Register shift, XMMRegister xmm_tmp,
|
||||||
XMMRegister xmm_shift,
|
XMMRegister xmm_shift,
|
||||||
Register tmp_shift) {
|
Register tmp_shift) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK_NE(xmm_tmp, dst);
|
DCHECK_NE(xmm_tmp, dst);
|
||||||
DCHECK_NE(xmm_tmp, src);
|
DCHECK_NE(xmm_tmp, src);
|
||||||
@ -1049,9 +1056,9 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
|
|||||||
Psubq(dst, xmm_tmp);
|
Psubq(dst, xmm_tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs,
|
void SharedMacroAssemblerBase::I64x2Mul(XMMRegister dst, XMMRegister lhs,
|
||||||
XMMRegister rhs, XMMRegister tmp1,
|
XMMRegister rhs, XMMRegister tmp1,
|
||||||
XMMRegister tmp2) {
|
XMMRegister tmp2) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
DCHECK(!AreAliased(dst, tmp1, tmp2));
|
DCHECK(!AreAliased(dst, tmp1, tmp2));
|
||||||
DCHECK(!AreAliased(lhs, tmp1, tmp2));
|
DCHECK(!AreAliased(lhs, tmp1, tmp2));
|
||||||
@ -1099,9 +1106,10 @@ void SharedTurboAssembler::I64x2Mul(XMMRegister dst, XMMRegister lhs,
|
|||||||
// 2. Unpack src1, src0 into even-number elements of dst.
|
// 2. Unpack src1, src0 into even-number elements of dst.
|
||||||
// 3. Multiply 1. with 2.
|
// 3. Multiply 1. with 2.
|
||||||
// For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq.
|
// For non-AVX, use non-destructive pshufd instead of punpckldq/punpckhdq.
|
||||||
void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister scratch,
|
XMMRegister src2,
|
||||||
bool low, bool is_signed) {
|
XMMRegister scratch, bool low,
|
||||||
|
bool is_signed) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -1130,8 +1138,8 @@ void SharedTurboAssembler::I64x2ExtMul(XMMRegister dst, XMMRegister src1,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
|
void SharedMacroAssemblerBase::I64x2SConvertI32x4High(XMMRegister dst,
|
||||||
XMMRegister src) {
|
XMMRegister src) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -1148,9 +1156,9 @@ void SharedTurboAssembler::I64x2SConvertI32x4High(XMMRegister dst,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
|
void SharedMacroAssemblerBase::I64x2UConvertI32x4High(XMMRegister dst,
|
||||||
XMMRegister src,
|
XMMRegister src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(this, AVX);
|
CpuFeatureScope avx_scope(this, AVX);
|
||||||
@ -1170,8 +1178,8 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
|
void SharedMacroAssemblerBase::S128Not(XMMRegister dst, XMMRegister src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (dst == src) {
|
if (dst == src) {
|
||||||
Pcmpeqd(scratch, scratch);
|
Pcmpeqd(scratch, scratch);
|
||||||
@ -1182,9 +1190,9 @@ void SharedTurboAssembler::S128Not(XMMRegister dst, XMMRegister src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
|
void SharedMacroAssemblerBase::S128Select(XMMRegister dst, XMMRegister mask,
|
||||||
XMMRegister src1, XMMRegister src2,
|
XMMRegister src1, XMMRegister src2,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)).
|
// v128.select = v128.or(v128.and(v1, c), v128.andnot(v2, c)).
|
||||||
// pandn(x, y) = !x & y, so we have to flip the mask and input.
|
// pandn(x, y) = !x & y, so we have to flip the mask and input.
|
||||||
@ -1203,8 +1211,8 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
|
void SharedMacroAssemblerBase::S128Load8Splat(XMMRegister dst, Operand src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// The trap handler uses the current pc to creating a landing, so that it can
|
// The trap handler uses the current pc to creating a landing, so that it can
|
||||||
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
|
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
|
||||||
@ -1226,8 +1234,8 @@ void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
|
void SharedMacroAssemblerBase::S128Load16Splat(XMMRegister dst, Operand src,
|
||||||
XMMRegister scratch) {
|
XMMRegister scratch) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// The trap handler uses the current pc to creating a landing, so that it can
|
// The trap handler uses the current pc to creating a landing, so that it can
|
||||||
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
|
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
|
||||||
@ -1248,7 +1256,7 @@ void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
|
void SharedMacroAssemblerBase::S128Load32Splat(XMMRegister dst, Operand src) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
// The trap handler uses the current pc to creating a landing, so that it can
|
// The trap handler uses the current pc to creating a landing, so that it can
|
||||||
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
|
// determine if a trap occured in Wasm code due to a OOB load. Make sure the
|
||||||
@ -1262,8 +1270,8 @@ void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
|
void SharedMacroAssemblerBase::S128Store64Lane(Operand dst, XMMRegister src,
|
||||||
uint8_t laneidx) {
|
uint8_t laneidx) {
|
||||||
ASM_CODE_COMMENT(this);
|
ASM_CODE_COMMENT(this);
|
||||||
if (laneidx == 0) {
|
if (laneidx == 0) {
|
||||||
Movlps(dst, src);
|
Movlps(dst, src);
|
||||||
@ -1342,27 +1350,27 @@ void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
|
|||||||
sub##ps_or_pd(dst, tmp); \
|
sub##ps_or_pd(dst, tmp); \
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::F32x4Qfma(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister src3,
|
XMMRegister src2, XMMRegister src3,
|
||||||
XMMRegister tmp) {
|
XMMRegister tmp) {
|
||||||
QFMA(ps)
|
QFMA(ps)
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::F32x4Qfms(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister src3,
|
XMMRegister src2, XMMRegister src3,
|
||||||
XMMRegister tmp) {
|
XMMRegister tmp) {
|
||||||
QFMS(ps)
|
QFMS(ps)
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::F64x2Qfma(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister src3,
|
XMMRegister src2, XMMRegister src3,
|
||||||
XMMRegister tmp) {
|
XMMRegister tmp) {
|
||||||
QFMA(pd);
|
QFMA(pd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedTurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
|
void SharedMacroAssemblerBase::F64x2Qfms(XMMRegister dst, XMMRegister src1,
|
||||||
XMMRegister src2, XMMRegister src3,
|
XMMRegister src2, XMMRegister src3,
|
||||||
XMMRegister tmp) {
|
XMMRegister tmp) {
|
||||||
QFMS(pd);
|
QFMS(pd);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,7 +8,7 @@
|
|||||||
#include "src/base/macros.h"
|
#include "src/base/macros.h"
|
||||||
#include "src/codegen/cpu-features.h"
|
#include "src/codegen/cpu-features.h"
|
||||||
#include "src/codegen/external-reference.h"
|
#include "src/codegen/external-reference.h"
|
||||||
#include "src/codegen/turbo-assembler.h"
|
#include "src/codegen/macro-assembler-base.h"
|
||||||
|
|
||||||
#if V8_TARGET_ARCH_IA32
|
#if V8_TARGET_ARCH_IA32
|
||||||
#include "src/codegen/ia32/register-ia32.h"
|
#include "src/codegen/ia32/register-ia32.h"
|
||||||
@ -30,15 +30,15 @@ constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize;
|
|||||||
constexpr int kStackSavedSavedFPSize = kDoubleSize;
|
constexpr int kStackSavedSavedFPSize = kDoubleSize;
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
||||||
// Base class for SharedTurboAssemblerBase. This class contains macro-assembler
|
// Base class for SharedMacroAssembler. This class contains macro-assembler
|
||||||
// functions that can be shared across ia32 and x64 without any template
|
// functions that can be shared across ia32 and x64 without any template
|
||||||
// machinery, i.e. does not require the CRTP pattern that
|
// machinery, i.e. does not require the CRTP pattern that
|
||||||
// SharedTurboAssemblerBase exposes. This allows us to keep the bulk of
|
// SharedMacroAssembler exposes. This allows us to keep the bulk of
|
||||||
// definition inside a separate source file, rather than putting everything
|
// definition inside a separate source file, rather than putting everything
|
||||||
// inside this header.
|
// inside this header.
|
||||||
class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
|
class V8_EXPORT_PRIVATE SharedMacroAssemblerBase : public MacroAssemblerBase {
|
||||||
public:
|
public:
|
||||||
using TurboAssemblerBase::TurboAssemblerBase;
|
using MacroAssemblerBase::MacroAssemblerBase;
|
||||||
|
|
||||||
void Move(Register dst, uint32_t src);
|
void Move(Register dst, uint32_t src);
|
||||||
// Move if registers are not identical.
|
// Move if registers are not identical.
|
||||||
@ -530,41 +530,41 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
|
|||||||
void I16x8SplatPreAvx2(XMMRegister dst, Op src);
|
void I16x8SplatPreAvx2(XMMRegister dst, Op src);
|
||||||
};
|
};
|
||||||
|
|
||||||
// Common base class template shared by ia32 and x64 TurboAssembler. This uses
|
// Common base class template shared by ia32 and x64 MacroAssembler. This uses
|
||||||
// the Curiously Recurring Template Pattern (CRTP), where Impl is the actual
|
// the Curiously Recurring Template Pattern (CRTP), where Impl is the actual
|
||||||
// class (subclass of SharedTurboAssemblerBase instantiated with the actual
|
// class (subclass of SharedMacroAssembler instantiated with the actual
|
||||||
// class). This allows static polymorphism, where member functions can be move
|
// class). This allows static polymorphism, where member functions can be move
|
||||||
// into SharedTurboAssembler, and we can also call into member functions
|
// into SharedMacroAssemblerBase, and we can also call into member functions
|
||||||
// defined in ia32 or x64 specific TurboAssembler from within this template
|
// defined in ia32 or x64 specific MacroAssembler from within this template
|
||||||
// class, via Impl.
|
// class, via Impl.
|
||||||
//
|
//
|
||||||
// Note: all member functions must be defined in this header file so that the
|
// Note: all member functions must be defined in this header file so that the
|
||||||
// compiler can generate code for the function definitions. See
|
// compiler can generate code for the function definitions. See
|
||||||
// https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale.
|
// https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale.
|
||||||
// If a function does not need polymorphism, move it into SharedTurboAssembler,
|
// If a function does not need polymorphism, move it into
|
||||||
// and define it outside of this header.
|
// SharedMacroAssemblerBase, and define it outside of this header.
|
||||||
template <typename Impl>
|
template <typename Impl>
|
||||||
class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
|
class V8_EXPORT_PRIVATE SharedMacroAssembler : public SharedMacroAssemblerBase {
|
||||||
using SharedTurboAssembler::SharedTurboAssembler;
|
using SharedMacroAssemblerBase::SharedMacroAssemblerBase;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
void Abspd(XMMRegister dst, XMMRegister src, Register tmp) {
|
void Abspd(XMMRegister dst, XMMRegister src, Register tmp) {
|
||||||
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
|
FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps,
|
||||||
ExternalReference::address_of_double_abs_constant());
|
ExternalReference::address_of_double_abs_constant());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Absps(XMMRegister dst, XMMRegister src, Register tmp) {
|
void Absps(XMMRegister dst, XMMRegister src, Register tmp) {
|
||||||
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Andps,
|
FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Andps,
|
||||||
ExternalReference::address_of_float_abs_constant());
|
ExternalReference::address_of_float_abs_constant());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Negpd(XMMRegister dst, XMMRegister src, Register tmp) {
|
void Negpd(XMMRegister dst, XMMRegister src, Register tmp) {
|
||||||
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
|
FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps,
|
||||||
ExternalReference::address_of_double_neg_constant());
|
ExternalReference::address_of_double_neg_constant());
|
||||||
}
|
}
|
||||||
|
|
||||||
void Negps(XMMRegister dst, XMMRegister src, Register tmp) {
|
void Negps(XMMRegister dst, XMMRegister src, Register tmp) {
|
||||||
FloatUnop(dst, src, tmp, &SharedTurboAssembler::Xorps,
|
FloatUnop(dst, src, tmp, &SharedMacroAssemblerBase::Xorps,
|
||||||
ExternalReference::address_of_float_neg_constant());
|
ExternalReference::address_of_float_neg_constant());
|
||||||
}
|
}
|
||||||
#undef FLOAT_UNOP
|
#undef FLOAT_UNOP
|
||||||
@ -975,15 +975,16 @@ class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
|
|||||||
return impl()->ExternalReferenceAsOperand(reference, scratch);
|
return impl()->ExternalReferenceAsOperand(reference, scratch);
|
||||||
}
|
}
|
||||||
|
|
||||||
using FloatInstruction = void (SharedTurboAssembler::*)(XMMRegister,
|
using FloatInstruction = void (SharedMacroAssemblerBase::*)(XMMRegister,
|
||||||
XMMRegister, Operand);
|
XMMRegister,
|
||||||
|
Operand);
|
||||||
void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp,
|
void FloatUnop(XMMRegister dst, XMMRegister src, Register tmp,
|
||||||
FloatInstruction op, ExternalReference ext) {
|
FloatInstruction op, ExternalReference ext) {
|
||||||
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
|
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
|
||||||
movaps(dst, src);
|
movaps(dst, src);
|
||||||
src = dst;
|
src = dst;
|
||||||
}
|
}
|
||||||
SharedTurboAssembler* assm = this;
|
SharedMacroAssemblerBase* assm = this;
|
||||||
(assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp));
|
(assm->*op)(dst, src, ExternalReferenceAsOperand(ext, tmp));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -79,10 +79,10 @@ std::vector<SourcePositionInfo> SourcePosition::InliningStack(Isolate* isolate,
|
|||||||
}
|
}
|
||||||
|
|
||||||
SourcePositionInfo SourcePosition::FirstInfo(Isolate* isolate,
|
SourcePositionInfo SourcePosition::FirstInfo(Isolate* isolate,
|
||||||
Handle<Code> code) const {
|
Code code) const {
|
||||||
DisallowGarbageCollection no_gc;
|
DisallowGarbageCollection no_gc;
|
||||||
DeoptimizationData deopt_data =
|
DeoptimizationData deopt_data =
|
||||||
DeoptimizationData::cast(code->deoptimization_data());
|
DeoptimizationData::cast(code.deoptimization_data());
|
||||||
SourcePosition pos = *this;
|
SourcePosition pos = *this;
|
||||||
if (pos.isInlined()) {
|
if (pos.isInlined()) {
|
||||||
InliningPosition inl = deopt_data.InliningPositions().get(pos.InliningId());
|
InliningPosition inl = deopt_data.InliningPositions().get(pos.InliningId());
|
||||||
|
@ -83,7 +83,7 @@ class SourcePosition final {
|
|||||||
Code code) const;
|
Code code) const;
|
||||||
std::vector<SourcePositionInfo> InliningStack(
|
std::vector<SourcePositionInfo> InliningStack(
|
||||||
OptimizedCompilationInfo* cinfo) const;
|
OptimizedCompilationInfo* cinfo) const;
|
||||||
SourcePositionInfo FirstInfo(Isolate* isolate, Handle<Code> code) const;
|
SourcePositionInfo FirstInfo(Isolate* isolate, Code code) const;
|
||||||
|
|
||||||
void Print(std::ostream& out, InstructionStream code) const;
|
void Print(std::ostream& out, InstructionStream code) const;
|
||||||
void PrintJson(std::ostream& out) const;
|
void PrintJson(std::ostream& out) const;
|
||||||
|
@ -283,8 +283,8 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) {
|
|||||||
if (IsCompressedEmbeddedObject(rmode_)) {
|
if (IsCompressedEmbeddedObject(rmode_)) {
|
||||||
Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
|
Tagged_t compressed = ReadUnalignedValue<Tagged_t>(pc_);
|
||||||
DCHECK(!HAS_SMI_TAG(compressed));
|
DCHECK(!HAS_SMI_TAG(compressed));
|
||||||
Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base,
|
Object obj(
|
||||||
compressed));
|
V8HeapCompressionScheme::DecompressTagged(cage_base, compressed));
|
||||||
// Embedding of compressed InstructionStream objects must not happen when
|
// Embedding of compressed InstructionStream objects must not happen when
|
||||||
// external code space is enabled, because Codes must be used
|
// external code space is enabled, because Codes must be used
|
||||||
// instead.
|
// instead.
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -55,10 +55,10 @@ class StackArgumentsAccessor {
|
|||||||
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
|
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
|
||||||
};
|
};
|
||||||
|
|
||||||
class V8_EXPORT_PRIVATE TurboAssembler
|
class V8_EXPORT_PRIVATE MacroAssembler
|
||||||
: public SharedTurboAssemblerBase<TurboAssembler> {
|
: public SharedMacroAssembler<MacroAssembler> {
|
||||||
public:
|
public:
|
||||||
using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
|
using SharedMacroAssembler<MacroAssembler>::SharedMacroAssembler;
|
||||||
|
|
||||||
void PushReturnAddressFrom(Register src) { pushq(src); }
|
void PushReturnAddressFrom(Register src) { pushq(src); }
|
||||||
void PopReturnAddressTo(Register dst) { popq(dst); }
|
void PopReturnAddressTo(Register dst) { popq(dst); }
|
||||||
@ -583,35 +583,21 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Pointer compression support
|
// Pointer compression support
|
||||||
|
|
||||||
// Loads a field containing a HeapObject and decompresses it if pointer
|
// Loads a field containing any tagged value and decompresses it if necessary.
|
||||||
// compression is enabled.
|
void LoadTaggedField(Register destination, Operand field_operand);
|
||||||
void LoadTaggedPointerField(Register destination, Operand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing a HeapObject but does not decompress it when
|
// Loads a field containing any tagged value but does not decompress it when
|
||||||
// pointer compression is enabled.
|
// pointer compression is enabled.
|
||||||
void LoadTaggedPointerField(TaggedRegister destination,
|
void LoadTaggedField(TaggedRegister destination, Operand field_operand);
|
||||||
Operand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing a Smi and decompresses it if pointer compression
|
// Loads a field containing a Smi and decompresses it if pointer compression
|
||||||
// is enabled.
|
// is enabled.
|
||||||
void LoadTaggedSignedField(Register destination, Operand field_operand);
|
void LoadTaggedSignedField(Register destination, Operand field_operand);
|
||||||
|
|
||||||
// Loads a field containing any tagged value and decompresses it if necessary.
|
|
||||||
void LoadAnyTaggedField(Register destination, Operand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value but does not decompress it when
|
|
||||||
// pointer compression is enabled.
|
|
||||||
void LoadAnyTaggedField(TaggedRegister destination, Operand field_operand);
|
|
||||||
|
|
||||||
// Loads a field containing a HeapObject, decompresses it if necessary and
|
|
||||||
// pushes full pointer to the stack. When pointer compression is enabled,
|
|
||||||
// uses |scratch| to decompress the value.
|
|
||||||
void PushTaggedPointerField(Operand field_operand, Register scratch);
|
|
||||||
|
|
||||||
// Loads a field containing any tagged value, decompresses it if necessary and
|
// Loads a field containing any tagged value, decompresses it if necessary and
|
||||||
// pushes the full pointer to the stack. When pointer compression is enabled,
|
// pushes the full pointer to the stack. When pointer compression is enabled,
|
||||||
// uses |scratch| to decompress the value.
|
// uses |scratch| to decompress the value.
|
||||||
void PushTaggedAnyField(Operand field_operand, Register scratch);
|
void PushTaggedField(Operand field_operand, Register scratch);
|
||||||
|
|
||||||
// Loads a field containing smi value and untags it.
|
// Loads a field containing smi value and untags it.
|
||||||
void SmiUntagField(Register dst, Operand src);
|
void SmiUntagField(Register dst, Operand src);
|
||||||
@ -626,10 +612,9 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
|||||||
|
|
||||||
// The following macros work even when pointer compression is not enabled.
|
// The following macros work even when pointer compression is not enabled.
|
||||||
void DecompressTaggedSigned(Register destination, Operand field_operand);
|
void DecompressTaggedSigned(Register destination, Operand field_operand);
|
||||||
void DecompressTaggedPointer(Register destination, Operand field_operand);
|
void DecompressTagged(Register destination, Operand field_operand);
|
||||||
void DecompressTaggedPointer(Register destination, Register source);
|
void DecompressTagged(Register destination, Register source);
|
||||||
void DecompressTaggedPointer(Register destination, Tagged_t immediate);
|
void DecompressTagged(Register destination, Tagged_t immediate);
|
||||||
void DecompressAnyTagged(Register destination, Operand field_operand);
|
|
||||||
|
|
||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// V8 Sandbox support
|
// V8 Sandbox support
|
||||||
@ -653,23 +638,6 @@ class V8_EXPORT_PRIVATE TurboAssembler
|
|||||||
IsolateRootLocation isolateRootLocation =
|
IsolateRootLocation isolateRootLocation =
|
||||||
IsolateRootLocation::kInRootRegister);
|
IsolateRootLocation::kInRootRegister);
|
||||||
|
|
||||||
protected:
|
|
||||||
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
|
||||||
|
|
||||||
// Returns a register holding the smi value. The register MUST NOT be
|
|
||||||
// modified. It may be the "smi 1 constant" register.
|
|
||||||
Register GetSmiConstant(Smi value);
|
|
||||||
|
|
||||||
// Drops arguments assuming that the return address was already popped.
|
|
||||||
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
|
|
||||||
ArgumentsCountMode mode = kCountExcludesReceiver);
|
|
||||||
};
|
|
||||||
|
|
||||||
// MacroAssembler implements a collection of frequently used macros.
|
|
||||||
class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|
||||||
public:
|
|
||||||
using TurboAssembler::TurboAssembler;
|
|
||||||
|
|
||||||
// Loads and stores the value of an external reference.
|
// Loads and stores the value of an external reference.
|
||||||
// Special case code for load and store to take advantage of
|
// Special case code for load and store to take advantage of
|
||||||
// load_rax/store_rax if possible/necessary.
|
// load_rax/store_rax if possible/necessary.
|
||||||
@ -781,7 +749,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
// ---------------------------------------------------------------------------
|
// ---------------------------------------------------------------------------
|
||||||
// Macro instructions.
|
// Macro instructions.
|
||||||
|
|
||||||
using TurboAssembler::Cmp;
|
|
||||||
void Cmp(Register dst, Handle<Object> source);
|
void Cmp(Register dst, Handle<Object> source);
|
||||||
void Cmp(Operand dst, Handle<Object> source);
|
void Cmp(Operand dst, Handle<Object> source);
|
||||||
|
|
||||||
@ -945,6 +912,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
|
|||||||
// In-place weak references.
|
// In-place weak references.
|
||||||
void LoadWeakValue(Register in_out, Label* target_if_cleared);
|
void LoadWeakValue(Register in_out, Label* target_if_cleared);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
||||||
|
|
||||||
|
// Returns a register holding the smi value. The register MUST NOT be
|
||||||
|
// modified. It may be the "smi 1 constant" register.
|
||||||
|
Register GetSmiConstant(Smi value);
|
||||||
|
|
||||||
|
// Drops arguments assuming that the return address was already popped.
|
||||||
|
void DropArguments(Register count, ArgumentsCountType type = kCountIsInteger,
|
||||||
|
ArgumentsCountMode mode = kCountExcludesReceiver);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Helper functions for generating invokes.
|
// Helper functions for generating invokes.
|
||||||
void InvokePrologue(Register expected_parameter_count,
|
void InvokePrologue(Register expected_parameter_count,
|
||||||
|
@ -2035,7 +2035,8 @@ enum IsolateAddressId {
|
|||||||
V(TrapNullDereference) \
|
V(TrapNullDereference) \
|
||||||
V(TrapIllegalCast) \
|
V(TrapIllegalCast) \
|
||||||
V(TrapArrayOutOfBounds) \
|
V(TrapArrayOutOfBounds) \
|
||||||
V(TrapArrayTooLarge)
|
V(TrapArrayTooLarge) \
|
||||||
|
V(TrapStringOffsetOutOfBounds)
|
||||||
|
|
||||||
enum KeyedAccessLoadMode {
|
enum KeyedAccessLoadMode {
|
||||||
STANDARD_LOAD,
|
STANDARD_LOAD,
|
||||||
|
@ -69,8 +69,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
|
|||||||
|
|
||||||
// static
|
// static
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
Address V8HeapCompressionScheme::DecompressTaggedPointer(
|
Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
Tagged_t raw_value) {
|
||||||
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
||||||
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
||||||
V8_ASSUME((base_ & kPtrComprCageBaseMask) == base_);
|
V8_ASSUME((base_ & kPtrComprCageBaseMask) == base_);
|
||||||
@ -79,19 +79,15 @@ Address V8HeapCompressionScheme::DecompressTaggedPointer(
|
|||||||
// For V8_ASSUME_ALIGNED to be considered for optimizations the following
|
// For V8_ASSUME_ALIGNED to be considered for optimizations the following
|
||||||
// addition has to happen on a pointer type.
|
// addition has to happen on a pointer type.
|
||||||
Address result = reinterpret_cast<Address>(cage_base + raw_value);
|
Address result = reinterpret_cast<Address>(cage_base + raw_value);
|
||||||
V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
|
|
||||||
return result;
|
|
||||||
#else
|
#else
|
||||||
Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr);
|
Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr);
|
||||||
return cage_base + static_cast<Address>(raw_value);
|
Address result = cage_base + static_cast<Address>(raw_value);
|
||||||
#endif
|
#endif
|
||||||
}
|
// Allows to remove compress(decompress(...))
|
||||||
|
V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
|
||||||
// static
|
// Allows to remove SMI checks when the result is compared against a constant.
|
||||||
template <typename TOnHeapAddress>
|
V8_ASSUME(HAS_SMI_TAG(result) == HAS_SMI_TAG(raw_value));
|
||||||
Address V8HeapCompressionScheme::DecompressTaggedAny(
|
return result;
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
|
||||||
return DecompressTaggedPointer(on_heap_addr, raw_value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
@ -102,10 +98,10 @@ void V8HeapCompressionScheme::ProcessIntermediatePointers(
|
|||||||
// If pointer compression is enabled, we may have random compressed pointers
|
// If pointer compression is enabled, we may have random compressed pointers
|
||||||
// on the stack that may be used for subsequent operations.
|
// on the stack that may be used for subsequent operations.
|
||||||
// Extract, decompress and trace both halfwords.
|
// Extract, decompress and trace both halfwords.
|
||||||
Address decompressed_low = V8HeapCompressionScheme::DecompressTaggedPointer(
|
Address decompressed_low = V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base, static_cast<Tagged_t>(raw_value));
|
cage_base, static_cast<Tagged_t>(raw_value));
|
||||||
callback(decompressed_low);
|
callback(decompressed_low);
|
||||||
Address decompressed_high = V8HeapCompressionScheme::DecompressTaggedPointer(
|
Address decompressed_high = V8HeapCompressionScheme::DecompressTagged(
|
||||||
cage_base,
|
cage_base,
|
||||||
static_cast<Tagged_t>(raw_value >> (sizeof(Tagged_t) * CHAR_BIT)));
|
static_cast<Tagged_t>(raw_value >> (sizeof(Tagged_t) * CHAR_BIT)));
|
||||||
callback(decompressed_high);
|
callback(decompressed_high);
|
||||||
@ -162,7 +158,7 @@ Address ExternalCodeCompressionScheme::DecompressTaggedSigned(
|
|||||||
|
|
||||||
// static
|
// static
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
Address ExternalCodeCompressionScheme::DecompressTaggedPointer(
|
Address ExternalCodeCompressionScheme::DecompressTagged(
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
||||||
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
#if defined(V8_COMPRESS_POINTERS_IN_SHARED_CAGE) && \
|
||||||
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
!defined(V8_COMPRESS_POINTERS_DONT_USE_GLOBAL_BASE)
|
||||||
@ -172,19 +168,15 @@ Address ExternalCodeCompressionScheme::DecompressTaggedPointer(
|
|||||||
// For V8_ASSUME_ALIGNED to be considered for optimizations the following
|
// For V8_ASSUME_ALIGNED to be considered for optimizations the following
|
||||||
// addition has to happen on a pointer type.
|
// addition has to happen on a pointer type.
|
||||||
Address result = reinterpret_cast<Address>(cage_base + raw_value);
|
Address result = reinterpret_cast<Address>(cage_base + raw_value);
|
||||||
V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
|
|
||||||
return result;
|
|
||||||
#else
|
#else
|
||||||
Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr);
|
Address cage_base = GetPtrComprCageBaseAddress(on_heap_addr);
|
||||||
return cage_base + static_cast<Address>(raw_value);
|
Address result = cage_base + static_cast<Address>(raw_value);
|
||||||
#endif
|
#endif
|
||||||
}
|
// Allows to remove compress(decompress(...))
|
||||||
|
V8_ASSUME(static_cast<uint32_t>(result) == raw_value);
|
||||||
// static
|
// Allows to remove SMI checks when the result is compared against a constant.
|
||||||
template <typename TOnHeapAddress>
|
V8_ASSUME(HAS_SMI_TAG(result) == HAS_SMI_TAG(raw_value));
|
||||||
Address ExternalCodeCompressionScheme::DecompressTaggedAny(
|
return result;
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
|
||||||
return DecompressTaggedPointer(on_heap_addr, raw_value);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // V8_EXTERNAL_CODE_SPACE
|
#endif // V8_EXTERNAL_CODE_SPACE
|
||||||
@ -223,15 +215,8 @@ Address V8HeapCompressionScheme::DecompressTaggedSigned(Tagged_t raw_value) {
|
|||||||
|
|
||||||
// static
|
// static
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
Address V8HeapCompressionScheme::DecompressTaggedPointer(
|
Address V8HeapCompressionScheme::DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
Tagged_t raw_value) {
|
||||||
UNREACHABLE();
|
|
||||||
}
|
|
||||||
|
|
||||||
// static
|
|
||||||
template <typename TOnHeapAddress>
|
|
||||||
Address V8HeapCompressionScheme::DecompressTaggedAny(
|
|
||||||
TOnHeapAddress on_heap_addr, Tagged_t raw_value) {
|
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -29,15 +29,10 @@ class V8HeapCompressionScheme {
|
|||||||
// Decompresses smi value.
|
// Decompresses smi value.
|
||||||
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
||||||
|
|
||||||
// Decompresses weak or strong heap object pointer or forwarding pointer,
|
|
||||||
// preserving both weak- and smi- tags.
|
|
||||||
template <typename TOnHeapAddress>
|
|
||||||
V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
|
|
||||||
Tagged_t raw_value);
|
|
||||||
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
|
V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||||
Tagged_t raw_value);
|
Tagged_t raw_value);
|
||||||
|
|
||||||
// Given a 64bit raw value, found on the stack, calls the callback function
|
// Given a 64bit raw value, found on the stack, calls the callback function
|
||||||
// with all possible pointers that may be "contained" in compressed form in
|
// with all possible pointers that may be "contained" in compressed form in
|
||||||
@ -82,15 +77,10 @@ class ExternalCodeCompressionScheme {
|
|||||||
// Decompresses smi value.
|
// Decompresses smi value.
|
||||||
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
V8_INLINE static Address DecompressTaggedSigned(Tagged_t raw_value);
|
||||||
|
|
||||||
// Decompresses weak or strong heap object pointer or forwarding pointer,
|
|
||||||
// preserving both weak- and smi- tags.
|
|
||||||
template <typename TOnHeapAddress>
|
|
||||||
V8_INLINE static Address DecompressTaggedPointer(TOnHeapAddress on_heap_addr,
|
|
||||||
Tagged_t raw_value);
|
|
||||||
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
// Decompresses any tagged value, preserving both weak- and smi- tags.
|
||||||
template <typename TOnHeapAddress>
|
template <typename TOnHeapAddress>
|
||||||
V8_INLINE static Address DecompressTaggedAny(TOnHeapAddress on_heap_addr,
|
V8_INLINE static Address DecompressTagged(TOnHeapAddress on_heap_addr,
|
||||||
Tagged_t raw_value);
|
Tagged_t raw_value);
|
||||||
|
|
||||||
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||||
// Process-wide cage base value used for decompression.
|
// Process-wide cage base value used for decompression.
|
||||||
|
@ -29,7 +29,7 @@ namespace v8 {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
namespace compiler {
|
namespace compiler {
|
||||||
|
|
||||||
#define __ tasm()->
|
#define __ masm()->
|
||||||
|
|
||||||
// Adds Arm-specific methods to convert InstructionOperands.
|
// Adds Arm-specific methods to convert InstructionOperands.
|
||||||
class ArmOperandConverter final : public InstructionOperandConverter {
|
class ArmOperandConverter final : public InstructionOperandConverter {
|
||||||
@ -415,7 +415,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
|||||||
do { \
|
do { \
|
||||||
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
||||||
/* and generate a CallAddress instruction instead. */ \
|
/* and generate a CallAddress instruction instead. */ \
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||||
__ PrepareCallCFunction(0, 2); \
|
__ PrepareCallCFunction(0, 2); \
|
||||||
__ MovToFloatParameters(i.InputDoubleRegister(0), \
|
__ MovToFloatParameters(i.InputDoubleRegister(0), \
|
||||||
i.InputDoubleRegister(1)); \
|
i.InputDoubleRegister(1)); \
|
||||||
@ -429,7 +429,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
|||||||
do { \
|
do { \
|
||||||
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
||||||
/* and generate a CallAddress instruction instead. */ \
|
/* and generate a CallAddress instruction instead. */ \
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||||
__ PrepareCallCFunction(0, 1); \
|
__ PrepareCallCFunction(0, 1); \
|
||||||
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
|
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
|
||||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
|
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
|
||||||
@ -473,7 +473,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
|||||||
if (instr->InputAt(1)->IsImmediate()) { \
|
if (instr->InputAt(1)->IsImmediate()) { \
|
||||||
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
|
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
|
||||||
} else { \
|
} else { \
|
||||||
UseScratchRegisterScope temps(tasm()); \
|
UseScratchRegisterScope temps(masm()); \
|
||||||
Simd128Register tmp = temps.AcquireQ(); \
|
Simd128Register tmp = temps.AcquireQ(); \
|
||||||
Register shift = temps.Acquire(); \
|
Register shift = temps.Acquire(); \
|
||||||
constexpr int mask = (1 << width) - 1; \
|
constexpr int mask = (1 << width) - 1; \
|
||||||
@ -493,7 +493,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
|
|||||||
if (instr->InputAt(1)->IsImmediate()) { \
|
if (instr->InputAt(1)->IsImmediate()) { \
|
||||||
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
|
__ asm_imm(dt, dst, src, i.InputInt##width(1)); \
|
||||||
} else { \
|
} else { \
|
||||||
UseScratchRegisterScope temps(tasm()); \
|
UseScratchRegisterScope temps(masm()); \
|
||||||
Simd128Register tmp = temps.AcquireQ(); \
|
Simd128Register tmp = temps.AcquireQ(); \
|
||||||
Register shift = temps.Acquire(); \
|
Register shift = temps.Acquire(); \
|
||||||
constexpr int mask = (1 << width) - 1; \
|
constexpr int mask = (1 << width) - 1; \
|
||||||
@ -518,20 +518,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
void FlushPendingPushRegisters(TurboAssembler* tasm,
|
void FlushPendingPushRegisters(MacroAssembler* masm,
|
||||||
FrameAccessState* frame_access_state,
|
FrameAccessState* frame_access_state,
|
||||||
ZoneVector<Register>* pending_pushes) {
|
ZoneVector<Register>* pending_pushes) {
|
||||||
switch (pending_pushes->size()) {
|
switch (pending_pushes->size()) {
|
||||||
case 0:
|
case 0:
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
tasm->push((*pending_pushes)[0]);
|
masm->push((*pending_pushes)[0]);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
|
masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
|
masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
|
||||||
(*pending_pushes)[2]);
|
(*pending_pushes)[2]);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -542,7 +542,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void AdjustStackPointerForTailCall(
|
void AdjustStackPointerForTailCall(
|
||||||
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
|
MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
|
||||||
ZoneVector<Register>* pending_pushes = nullptr,
|
ZoneVector<Register>* pending_pushes = nullptr,
|
||||||
bool allow_shrinkage = true) {
|
bool allow_shrinkage = true) {
|
||||||
int current_sp_offset = state->GetSPToFPSlotCount() +
|
int current_sp_offset = state->GetSPToFPSlotCount() +
|
||||||
@ -550,15 +550,15 @@ void AdjustStackPointerForTailCall(
|
|||||||
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
||||||
if (stack_slot_delta > 0) {
|
if (stack_slot_delta > 0) {
|
||||||
if (pending_pushes != nullptr) {
|
if (pending_pushes != nullptr) {
|
||||||
FlushPendingPushRegisters(tasm, state, pending_pushes);
|
FlushPendingPushRegisters(masm, state, pending_pushes);
|
||||||
}
|
}
|
||||||
tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
|
masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
||||||
if (pending_pushes != nullptr) {
|
if (pending_pushes != nullptr) {
|
||||||
FlushPendingPushRegisters(tasm, state, pending_pushes);
|
FlushPendingPushRegisters(masm, state, pending_pushes);
|
||||||
}
|
}
|
||||||
tasm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
|
masm->add(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize));
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -601,7 +601,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
|||||||
LocationOperand::cast(move->destination()));
|
LocationOperand::cast(move->destination()));
|
||||||
InstructionOperand source(move->source());
|
InstructionOperand source(move->source());
|
||||||
AdjustStackPointerForTailCall(
|
AdjustStackPointerForTailCall(
|
||||||
tasm(), frame_access_state(),
|
masm(), frame_access_state(),
|
||||||
destination_location.index() - pending_pushes.size(),
|
destination_location.index() - pending_pushes.size(),
|
||||||
&pending_pushes);
|
&pending_pushes);
|
||||||
// Pushes of non-register data types are not supported.
|
// Pushes of non-register data types are not supported.
|
||||||
@ -611,26 +611,26 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
|||||||
// TODO(arm): We can push more than 3 registers at once. Add support in
|
// TODO(arm): We can push more than 3 registers at once. Add support in
|
||||||
// the macro-assembler for pushing a list of registers.
|
// the macro-assembler for pushing a list of registers.
|
||||||
if (pending_pushes.size() == 3) {
|
if (pending_pushes.size() == 3) {
|
||||||
FlushPendingPushRegisters(tasm(), frame_access_state(),
|
FlushPendingPushRegisters(masm(), frame_access_state(),
|
||||||
&pending_pushes);
|
&pending_pushes);
|
||||||
}
|
}
|
||||||
move->Eliminate();
|
move->Eliminate();
|
||||||
}
|
}
|
||||||
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
|
FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
|
||||||
}
|
}
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset, nullptr, false);
|
first_unused_slot_offset, nullptr, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||||
int first_unused_slot_offset) {
|
int first_unused_slot_offset) {
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset);
|
first_unused_slot_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that {kJavaScriptCallCodeStartRegister} is correct.
|
// Check that {kJavaScriptCallCodeStartRegister} is correct.
|
||||||
void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ ComputeCodeStartAddress(scratch);
|
__ ComputeCodeStartAddress(scratch);
|
||||||
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
|
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
|
||||||
@ -645,7 +645,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
|||||||
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
||||||
// 3. if it is not zero then it jumps to the builtin.
|
// 3. if it is not zero then it jumps to the builtin.
|
||||||
void CodeGenerator::BailoutIfDeoptimized() {
|
void CodeGenerator::BailoutIfDeoptimized() {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||||
__ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
__ ldr(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||||
@ -747,7 +747,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArchCallJSFunction: {
|
case kArchCallJSFunction: {
|
||||||
Register func = i.InputRegister(0);
|
Register func = i.InputRegister(0);
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
// Check the function's context matches the context argument.
|
// Check the function's context matches the context argument.
|
||||||
__ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
|
__ ldr(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
|
||||||
@ -858,7 +858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
{
|
{
|
||||||
// We don't actually want to generate a pile of code for this, so just
|
// We don't actually want to generate a pile of code for this, so just
|
||||||
// claim there is a stack frame, without generating one.
|
// claim there is a stack frame, without generating one.
|
||||||
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
|
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
|
||||||
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
|
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -1069,7 +1069,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
i.InputRegister(2), i.OutputSBit());
|
i.InputRegister(2), i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
case kArmMls: {
|
case kArmMls: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv7);
|
CpuFeatureScope scope(masm(), ARMv7);
|
||||||
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
|
__ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
|
||||||
i.InputRegister(2));
|
i.InputRegister(2));
|
||||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||||
@ -1093,13 +1093,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
i.InputRegister(1), i.OutputSBit());
|
i.InputRegister(1), i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
case kArmSdiv: {
|
case kArmSdiv: {
|
||||||
CpuFeatureScope scope(tasm(), SUDIV);
|
CpuFeatureScope scope(masm(), SUDIV);
|
||||||
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
__ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
||||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmUdiv: {
|
case kArmUdiv: {
|
||||||
CpuFeatureScope scope(tasm(), SUDIV);
|
CpuFeatureScope scope(masm(), SUDIV);
|
||||||
__ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
__ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
|
||||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
@ -1127,20 +1127,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
i.OutputSBit());
|
i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
case kArmBfc: {
|
case kArmBfc: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv7);
|
CpuFeatureScope scope(masm(), ARMv7);
|
||||||
__ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
|
__ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
|
||||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmUbfx: {
|
case kArmUbfx: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv7);
|
CpuFeatureScope scope(masm(), ARMv7);
|
||||||
__ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
|
__ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
|
||||||
i.InputInt8(2));
|
i.InputInt8(2));
|
||||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmSbfx: {
|
case kArmSbfx: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv7);
|
CpuFeatureScope scope(masm(), ARMv7);
|
||||||
__ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
|
__ sbfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
|
||||||
i.InputInt8(2));
|
i.InputInt8(2));
|
||||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||||
@ -1183,7 +1183,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
case kArmRbit: {
|
case kArmRbit: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv7);
|
CpuFeatureScope scope(masm(), ARMv7);
|
||||||
__ rbit(i.OutputRegister(), i.InputRegister(0));
|
__ rbit(i.OutputRegister(), i.InputRegister(0));
|
||||||
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
DCHECK_EQ(LeaveCC, i.OutputSBit());
|
||||||
break;
|
break;
|
||||||
@ -1378,7 +1378,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmVmodF64: {
|
case kArmVmodF64: {
|
||||||
// TODO(bmeurer): We should really get rid of this special instruction,
|
// TODO(bmeurer): We should really get rid of this special instruction,
|
||||||
// and generate a CallAddress instruction instead.
|
// and generate a CallAddress instruction instead.
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL);
|
FrameScope scope(masm(), StackFrame::MANUAL);
|
||||||
__ PrepareCallCFunction(0, 2);
|
__ PrepareCallCFunction(0, 2);
|
||||||
__ MovToFloatParameters(i.InputDoubleRegister(0),
|
__ MovToFloatParameters(i.InputDoubleRegister(0),
|
||||||
i.InputDoubleRegister(1));
|
i.InputDoubleRegister(1));
|
||||||
@ -1398,7 +1398,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||||
break;
|
break;
|
||||||
case kArmVrintmF32: {
|
case kArmVrintmF32: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
if (instr->InputAt(0)->IsSimd128Register()) {
|
if (instr->InputAt(0)->IsSimd128Register()) {
|
||||||
__ vrintm(NeonS32, i.OutputSimd128Register(),
|
__ vrintm(NeonS32, i.OutputSimd128Register(),
|
||||||
i.InputSimd128Register(0));
|
i.InputSimd128Register(0));
|
||||||
@ -1408,12 +1408,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVrintmF64: {
|
case kArmVrintmF64: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVrintpF32: {
|
case kArmVrintpF32: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
if (instr->InputAt(0)->IsSimd128Register()) {
|
if (instr->InputAt(0)->IsSimd128Register()) {
|
||||||
__ vrintp(NeonS32, i.OutputSimd128Register(),
|
__ vrintp(NeonS32, i.OutputSimd128Register(),
|
||||||
i.InputSimd128Register(0));
|
i.InputSimd128Register(0));
|
||||||
@ -1423,12 +1423,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVrintpF64: {
|
case kArmVrintpF64: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVrintzF32: {
|
case kArmVrintzF32: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
if (instr->InputAt(0)->IsSimd128Register()) {
|
if (instr->InputAt(0)->IsSimd128Register()) {
|
||||||
__ vrintz(NeonS32, i.OutputSimd128Register(),
|
__ vrintz(NeonS32, i.OutputSimd128Register(),
|
||||||
i.InputSimd128Register(0));
|
i.InputSimd128Register(0));
|
||||||
@ -1438,17 +1438,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVrintzF64: {
|
case kArmVrintzF64: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVrintaF64: {
|
case kArmVrintaF64: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVrintnF32: {
|
case kArmVrintnF32: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
if (instr->InputAt(0)->IsSimd128Register()) {
|
if (instr->InputAt(0)->IsSimd128Register()) {
|
||||||
__ vrintn(NeonS32, i.OutputSimd128Register(),
|
__ vrintn(NeonS32, i.OutputSimd128Register(),
|
||||||
i.InputSimd128Register(0));
|
i.InputSimd128Register(0));
|
||||||
@ -1458,7 +1458,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVrintnF64: {
|
case kArmVrintnF64: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1473,7 +1473,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVcvtF32S32: {
|
case kArmVcvtF32S32: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister scratch = temps.AcquireS();
|
SwVfpRegister scratch = temps.AcquireS();
|
||||||
__ vmov(scratch, i.InputRegister(0));
|
__ vmov(scratch, i.InputRegister(0));
|
||||||
__ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
|
__ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
|
||||||
@ -1481,7 +1481,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVcvtF32U32: {
|
case kArmVcvtF32U32: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister scratch = temps.AcquireS();
|
SwVfpRegister scratch = temps.AcquireS();
|
||||||
__ vmov(scratch, i.InputRegister(0));
|
__ vmov(scratch, i.InputRegister(0));
|
||||||
__ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
|
__ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
|
||||||
@ -1489,7 +1489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVcvtF64S32: {
|
case kArmVcvtF64S32: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister scratch = temps.AcquireS();
|
SwVfpRegister scratch = temps.AcquireS();
|
||||||
__ vmov(scratch, i.InputRegister(0));
|
__ vmov(scratch, i.InputRegister(0));
|
||||||
__ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
|
__ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
|
||||||
@ -1497,7 +1497,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVcvtF64U32: {
|
case kArmVcvtF64U32: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister scratch = temps.AcquireS();
|
SwVfpRegister scratch = temps.AcquireS();
|
||||||
__ vmov(scratch, i.InputRegister(0));
|
__ vmov(scratch, i.InputRegister(0));
|
||||||
__ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
|
__ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
|
||||||
@ -1505,7 +1505,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVcvtS32F32: {
|
case kArmVcvtS32F32: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister scratch = temps.AcquireS();
|
SwVfpRegister scratch = temps.AcquireS();
|
||||||
__ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
|
__ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
|
||||||
__ vmov(i.OutputRegister(), scratch);
|
__ vmov(i.OutputRegister(), scratch);
|
||||||
@ -1520,7 +1520,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVcvtU32F32: {
|
case kArmVcvtU32F32: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister scratch = temps.AcquireS();
|
SwVfpRegister scratch = temps.AcquireS();
|
||||||
__ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
|
__ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
|
||||||
__ vmov(i.OutputRegister(), scratch);
|
__ vmov(i.OutputRegister(), scratch);
|
||||||
@ -1535,7 +1535,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVcvtS32F64: {
|
case kArmVcvtS32F64: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister scratch = temps.AcquireS();
|
SwVfpRegister scratch = temps.AcquireS();
|
||||||
__ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
|
__ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
|
||||||
__ vmov(i.OutputRegister(), scratch);
|
__ vmov(i.OutputRegister(), scratch);
|
||||||
@ -1543,7 +1543,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmVcvtU32F64: {
|
case kArmVcvtU32F64: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister scratch = temps.AcquireS();
|
SwVfpRegister scratch = temps.AcquireS();
|
||||||
__ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
|
__ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
|
||||||
__ vmov(i.OutputRegister(), scratch);
|
__ vmov(i.OutputRegister(), scratch);
|
||||||
@ -1762,7 +1762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ vldr(i.OutputFloatRegister(), MemOperand(fp, offset));
|
__ vldr(i.OutputFloatRegister(), MemOperand(fp, offset));
|
||||||
} else {
|
} else {
|
||||||
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
|
DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ add(scratch, fp, Operand(offset));
|
__ add(scratch, fp, Operand(offset));
|
||||||
__ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()),
|
__ vld1(Neon8, NeonListOperand(i.OutputSimd128Register()),
|
||||||
@ -1899,7 +1899,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
#undef ASSEMBLE_F64X2_ARITHMETIC_BINOP
|
#undef ASSEMBLE_F64X2_ARITHMETIC_BINOP
|
||||||
case kArmF64x2Eq: {
|
case kArmF64x2Eq: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ mov(scratch, Operand(0));
|
__ mov(scratch, Operand(0));
|
||||||
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
|
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
|
||||||
@ -1915,7 +1915,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmF64x2Ne: {
|
case kArmF64x2Ne: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ mov(scratch, Operand(0));
|
__ mov(scratch, Operand(0));
|
||||||
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
|
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
|
||||||
@ -1931,7 +1931,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmF64x2Lt: {
|
case kArmF64x2Lt: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
|
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
|
||||||
i.InputSimd128Register(1).low());
|
i.InputSimd128Register(1).low());
|
||||||
@ -1947,7 +1947,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmF64x2Le: {
|
case kArmF64x2Le: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
|
__ VFPCompareAndSetFlags(i.InputSimd128Register(0).low(),
|
||||||
i.InputSimd128Register(1).low());
|
i.InputSimd128Register(1).low());
|
||||||
@ -1989,7 +1989,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmF64x2Ceil: {
|
case kArmF64x2Ceil: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
Simd128Register dst = i.OutputSimd128Register();
|
Simd128Register dst = i.OutputSimd128Register();
|
||||||
Simd128Register src = i.InputSimd128Register(0);
|
Simd128Register src = i.InputSimd128Register(0);
|
||||||
__ vrintp(dst.low(), src.low());
|
__ vrintp(dst.low(), src.low());
|
||||||
@ -1997,7 +1997,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmF64x2Floor: {
|
case kArmF64x2Floor: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
Simd128Register dst = i.OutputSimd128Register();
|
Simd128Register dst = i.OutputSimd128Register();
|
||||||
Simd128Register src = i.InputSimd128Register(0);
|
Simd128Register src = i.InputSimd128Register(0);
|
||||||
__ vrintm(dst.low(), src.low());
|
__ vrintm(dst.low(), src.low());
|
||||||
@ -2005,7 +2005,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmF64x2Trunc: {
|
case kArmF64x2Trunc: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
Simd128Register dst = i.OutputSimd128Register();
|
Simd128Register dst = i.OutputSimd128Register();
|
||||||
Simd128Register src = i.InputSimd128Register(0);
|
Simd128Register src = i.InputSimd128Register(0);
|
||||||
__ vrintz(dst.low(), src.low());
|
__ vrintz(dst.low(), src.low());
|
||||||
@ -2013,7 +2013,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmF64x2NearestInt: {
|
case kArmF64x2NearestInt: {
|
||||||
CpuFeatureScope scope(tasm(), ARMv8);
|
CpuFeatureScope scope(masm(), ARMv8);
|
||||||
Simd128Register dst = i.OutputSimd128Register();
|
Simd128Register dst = i.OutputSimd128Register();
|
||||||
Simd128Register src = i.InputSimd128Register(0);
|
Simd128Register src = i.InputSimd128Register(0);
|
||||||
__ vrintn(dst.low(), src.low());
|
__ vrintn(dst.low(), src.low());
|
||||||
@ -2060,7 +2060,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmI64x2Mul: {
|
case kArmI64x2Mul: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
QwNeonRegister dst = i.OutputSimd128Register();
|
QwNeonRegister dst = i.OutputSimd128Register();
|
||||||
QwNeonRegister left = i.InputSimd128Register(0);
|
QwNeonRegister left = i.InputSimd128Register(0);
|
||||||
QwNeonRegister right = i.InputSimd128Register(1);
|
QwNeonRegister right = i.InputSimd128Register(1);
|
||||||
@ -2447,7 +2447,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kArmI32x4BitMask: {
|
case kArmI32x4BitMask: {
|
||||||
Register dst = i.OutputRegister();
|
Register dst = i.OutputRegister();
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register src = i.InputSimd128Register(0);
|
Simd128Register src = i.InputSimd128Register(0);
|
||||||
Simd128Register tmp = temps.AcquireQ();
|
Simd128Register tmp = temps.AcquireQ();
|
||||||
Simd128Register mask = i.TempSimd128Register(0);
|
Simd128Register mask = i.TempSimd128Register(0);
|
||||||
@ -2468,7 +2468,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
Simd128Register lhs = i.InputSimd128Register(0);
|
Simd128Register lhs = i.InputSimd128Register(0);
|
||||||
Simd128Register rhs = i.InputSimd128Register(1);
|
Simd128Register rhs = i.InputSimd128Register(1);
|
||||||
Simd128Register tmp1 = i.TempSimd128Register(0);
|
Simd128Register tmp1 = i.TempSimd128Register(0);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
__ vmull(NeonS16, tmp1, lhs.low(), rhs.low());
|
__ vmull(NeonS16, tmp1, lhs.low(), rhs.low());
|
||||||
__ vmull(NeonS16, scratch, lhs.high(), rhs.high());
|
__ vmull(NeonS16, scratch, lhs.high(), rhs.high());
|
||||||
@ -2650,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmI16x8BitMask: {
|
case kArmI16x8BitMask: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register dst = i.OutputRegister();
|
Register dst = i.OutputRegister();
|
||||||
Simd128Register src = i.InputSimd128Register(0);
|
Simd128Register src = i.InputSimd128Register(0);
|
||||||
Simd128Register tmp = temps.AcquireQ();
|
Simd128Register tmp = temps.AcquireQ();
|
||||||
@ -2805,7 +2805,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArmI8x16BitMask: {
|
case kArmI8x16BitMask: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register dst = i.OutputRegister();
|
Register dst = i.OutputRegister();
|
||||||
Simd128Register src = i.InputSimd128Register(0);
|
Simd128Register src = i.InputSimd128Register(0);
|
||||||
Simd128Register tmp = temps.AcquireQ();
|
Simd128Register tmp = temps.AcquireQ();
|
||||||
@ -2906,7 +2906,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
|
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
|
||||||
__ vmov(scratch, src1);
|
__ vmov(scratch, src1);
|
||||||
@ -2917,7 +2917,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
|
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from UnzipLeft).
|
||||||
__ vmov(scratch, src1);
|
__ vmov(scratch, src1);
|
||||||
@ -2928,7 +2928,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
|
// src0 = [0, 1, 2, 3], src1 = [4, 5, 6, 7]
|
||||||
__ vmov(scratch, src1);
|
__ vmov(scratch, src1);
|
||||||
@ -2961,7 +2961,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmS32x4TransposeRight: {
|
case kArmS32x4TransposeRight: {
|
||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
|
// src0 = [4, 5, 6, 7], src1 = [0, 1, 2, 3] (flipped from TransposeLeft).
|
||||||
@ -2990,7 +2990,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmS16x8UnzipLeft: {
|
case kArmS16x8UnzipLeft: {
|
||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
|
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
|
||||||
@ -3001,7 +3001,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmS16x8UnzipRight: {
|
case kArmS16x8UnzipRight: {
|
||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
|
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
|
||||||
@ -3012,7 +3012,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmS16x8TransposeLeft: {
|
case kArmS16x8TransposeLeft: {
|
||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
|
// src0 = [0, 1, 2, 3, ... 7], src1 = [8, 9, 10, 11, ... 15]
|
||||||
@ -3023,7 +3023,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmS16x8TransposeRight: {
|
case kArmS16x8TransposeRight: {
|
||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
|
// src0 = [8, 9, 10, 11, ... 15], src1 = [0, 1, 2, 3, ... 7] (flipped).
|
||||||
@ -3052,7 +3052,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmS8x16UnzipLeft: {
|
case kArmS8x16UnzipLeft: {
|
||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
|
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
|
||||||
@ -3063,7 +3063,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmS8x16UnzipRight: {
|
case kArmS8x16UnzipRight: {
|
||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
|
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
|
||||||
@ -3074,7 +3074,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmS8x16TransposeLeft: {
|
case kArmS8x16TransposeLeft: {
|
||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
|
// src0 = [0, 1, 2, 3, ... 15], src1 = [16, 17, 18, 19, ... 31]
|
||||||
@ -3085,7 +3085,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArmS8x16TransposeRight: {
|
case kArmS8x16TransposeRight: {
|
||||||
Simd128Register dst = i.OutputSimd128Register(),
|
Simd128Register dst = i.OutputSimd128Register(),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
DCHECK(dst == i.InputSimd128Register(0));
|
DCHECK(dst == i.InputSimd128Register(0));
|
||||||
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
|
// src0 = [16, 17, 18, 19, ... 31], src1 = [0, 1, 2, 3, ... 15] (flipped).
|
||||||
@ -3112,7 +3112,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
src0 = i.InputSimd128Register(0),
|
src0 = i.InputSimd128Register(0),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
DwVfpRegister table_base = src0.low();
|
DwVfpRegister table_base = src0.low();
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Simd128Register scratch = temps.AcquireQ();
|
Simd128Register scratch = temps.AcquireQ();
|
||||||
// If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
|
// If unary shuffle, table is src0 (2 d-registers), otherwise src0 and
|
||||||
// src1. They must be consecutive.
|
// src1. They must be consecutive.
|
||||||
@ -3163,7 +3163,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kArmV128AnyTrue: {
|
case kArmV128AnyTrue: {
|
||||||
const QwNeonRegister& src = i.InputSimd128Register(0);
|
const QwNeonRegister& src = i.InputSimd128Register(0);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
DwVfpRegister scratch = temps.AcquireD();
|
DwVfpRegister scratch = temps.AcquireD();
|
||||||
__ vpmax(NeonU32, scratch, src.low(), src.high());
|
__ vpmax(NeonU32, scratch, src.low(), src.high());
|
||||||
__ vpmax(NeonU32, scratch, scratch, scratch);
|
__ vpmax(NeonU32, scratch, scratch, scratch);
|
||||||
@ -3178,7 +3178,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kArmI32x4AllTrue: {
|
case kArmI32x4AllTrue: {
|
||||||
const QwNeonRegister& src = i.InputSimd128Register(0);
|
const QwNeonRegister& src = i.InputSimd128Register(0);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
DwVfpRegister scratch = temps.AcquireD();
|
DwVfpRegister scratch = temps.AcquireD();
|
||||||
__ vpmin(NeonU32, scratch, src.low(), src.high());
|
__ vpmin(NeonU32, scratch, src.low(), src.high());
|
||||||
__ vpmin(NeonU32, scratch, scratch, scratch);
|
__ vpmin(NeonU32, scratch, scratch, scratch);
|
||||||
@ -3189,7 +3189,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kArmI16x8AllTrue: {
|
case kArmI16x8AllTrue: {
|
||||||
const QwNeonRegister& src = i.InputSimd128Register(0);
|
const QwNeonRegister& src = i.InputSimd128Register(0);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
DwVfpRegister scratch = temps.AcquireD();
|
DwVfpRegister scratch = temps.AcquireD();
|
||||||
__ vpmin(NeonU16, scratch, src.low(), src.high());
|
__ vpmin(NeonU16, scratch, src.low(), src.high());
|
||||||
__ vpmin(NeonU16, scratch, scratch, scratch);
|
__ vpmin(NeonU16, scratch, scratch, scratch);
|
||||||
@ -3201,7 +3201,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kArmI8x16AllTrue: {
|
case kArmI8x16AllTrue: {
|
||||||
const QwNeonRegister& src = i.InputSimd128Register(0);
|
const QwNeonRegister& src = i.InputSimd128Register(0);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
DwVfpRegister scratch = temps.AcquireD();
|
DwVfpRegister scratch = temps.AcquireD();
|
||||||
__ vpmin(NeonU8, scratch, src.low(), src.high());
|
__ vpmin(NeonU8, scratch, src.low(), src.high());
|
||||||
__ vpmin(NeonU8, scratch, scratch, scratch);
|
__ vpmin(NeonU8, scratch, scratch, scratch);
|
||||||
@ -3747,7 +3747,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
// exception unconditionally. Thereby we can avoid the integer overflow
|
// exception unconditionally. Thereby we can avoid the integer overflow
|
||||||
// check in the condition code.
|
// check in the condition code.
|
||||||
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
|
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ ldr(scratch, FieldMemOperand(
|
__ ldr(scratch, FieldMemOperand(
|
||||||
kWasmInstanceRegister,
|
kWasmInstanceRegister,
|
||||||
@ -3873,8 +3873,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
|
|||||||
__ cmp(argc_reg, Operand(parameter_slots));
|
__ cmp(argc_reg, Operand(parameter_slots));
|
||||||
__ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
|
__ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
|
||||||
}
|
}
|
||||||
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
|
__ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
} else if (additional_pop_count->IsImmediate()) {
|
} else if (additional_pop_count->IsImmediate()) {
|
||||||
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
|
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
|
||||||
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
|
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
|
||||||
@ -3944,7 +3944,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
} else if (source->IsDoubleRegister()) {
|
} else if (source->IsDoubleRegister()) {
|
||||||
__ vstr(g.ToDoubleRegister(source), dst);
|
__ vstr(g.ToDoubleRegister(source), dst);
|
||||||
} else {
|
} else {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register temp = temps.Acquire();
|
Register temp = temps.Acquire();
|
||||||
QwNeonRegister src = g.ToSimd128Register(source);
|
QwNeonRegister src = g.ToSimd128Register(source);
|
||||||
__ add(temp, dst.rn(), Operand(dst.offset()));
|
__ add(temp, dst.rn(), Operand(dst.offset()));
|
||||||
@ -3965,7 +3965,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
} else if (source->IsDoubleStackSlot()) {
|
} else if (source->IsDoubleStackSlot()) {
|
||||||
__ vldr(g.ToDoubleRegister(destination), src);
|
__ vldr(g.ToDoubleRegister(destination), src);
|
||||||
} else {
|
} else {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register temp = temps.Acquire();
|
Register temp = temps.Acquire();
|
||||||
QwNeonRegister dst = g.ToSimd128Register(destination);
|
QwNeonRegister dst = g.ToSimd128Register(destination);
|
||||||
__ add(temp, src.rn(), Operand(src.offset()));
|
__ add(temp, src.rn(), Operand(src.offset()));
|
||||||
@ -3976,7 +3976,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
case MoveType::kStackToStack: {
|
case MoveType::kStackToStack: {
|
||||||
MemOperand src = g.ToMemOperand(source);
|
MemOperand src = g.ToMemOperand(source);
|
||||||
MemOperand dst = g.ToMemOperand(destination);
|
MemOperand dst = g.ToMemOperand(destination);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
|
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
|
||||||
SwVfpRegister temp = temps.AcquireS();
|
SwVfpRegister temp = temps.AcquireS();
|
||||||
__ vldr(temp, src);
|
__ vldr(temp, src);
|
||||||
@ -4014,27 +4014,27 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
Constant src = g.ToConstant(source);
|
Constant src = g.ToConstant(source);
|
||||||
MemOperand dst = g.ToMemOperand(destination);
|
MemOperand dst = g.ToMemOperand(destination);
|
||||||
if (destination->IsStackSlot()) {
|
if (destination->IsStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
// Acquire a S register instead of a general purpose register in case
|
// Acquire a S register instead of a general purpose register in case
|
||||||
// `vstr` needs one to compute the address of `dst`.
|
// `vstr` needs one to compute the address of `dst`.
|
||||||
SwVfpRegister s_temp = temps.AcquireS();
|
SwVfpRegister s_temp = temps.AcquireS();
|
||||||
{
|
{
|
||||||
// TODO(arm): This sequence could be optimized further if necessary by
|
// TODO(arm): This sequence could be optimized further if necessary by
|
||||||
// writing the constant directly into `s_temp`.
|
// writing the constant directly into `s_temp`.
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register temp = temps.Acquire();
|
Register temp = temps.Acquire();
|
||||||
MoveConstantToRegister(temp, src);
|
MoveConstantToRegister(temp, src);
|
||||||
__ vmov(s_temp, temp);
|
__ vmov(s_temp, temp);
|
||||||
}
|
}
|
||||||
__ vstr(s_temp, dst);
|
__ vstr(s_temp, dst);
|
||||||
} else if (destination->IsFloatStackSlot()) {
|
} else if (destination->IsFloatStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister temp = temps.AcquireS();
|
SwVfpRegister temp = temps.AcquireS();
|
||||||
__ vmov(temp, Float32::FromBits(src.ToFloat32AsInt()));
|
__ vmov(temp, Float32::FromBits(src.ToFloat32AsInt()));
|
||||||
__ vstr(temp, dst);
|
__ vstr(temp, dst);
|
||||||
} else {
|
} else {
|
||||||
DCHECK(destination->IsDoubleStackSlot());
|
DCHECK(destination->IsDoubleStackSlot());
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
DwVfpRegister temp = temps.AcquireD();
|
DwVfpRegister temp = temps.AcquireD();
|
||||||
// TODO(arm): Look into optimizing this further if possible. Supporting
|
// TODO(arm): Look into optimizing this further if possible. Supporting
|
||||||
// the NEON version of VMOV may help.
|
// the NEON version of VMOV may help.
|
||||||
@ -4060,7 +4060,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
|
|||||||
__ push(g.ToRegister(source));
|
__ push(g.ToRegister(source));
|
||||||
frame_access_state()->IncreaseSPDelta(new_slots);
|
frame_access_state()->IncreaseSPDelta(new_slots);
|
||||||
} else if (source->IsStackSlot()) {
|
} else if (source->IsStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ ldr(scratch, g.ToMemOperand(source));
|
__ ldr(scratch, g.ToMemOperand(source));
|
||||||
__ push(scratch);
|
__ push(scratch);
|
||||||
@ -4083,7 +4083,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
|
|||||||
if (dest->IsRegister()) {
|
if (dest->IsRegister()) {
|
||||||
__ pop(g.ToRegister(dest));
|
__ pop(g.ToRegister(dest));
|
||||||
} else if (dest->IsStackSlot()) {
|
} else if (dest->IsStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ pop(scratch);
|
__ pop(scratch);
|
||||||
__ str(scratch, g.ToMemOperand(dest));
|
__ str(scratch, g.ToMemOperand(dest));
|
||||||
@ -4110,7 +4110,7 @@ void CodeGenerator::PopTempStackSlots() {
|
|||||||
void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
|
void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
|
||||||
MachineRepresentation rep) {
|
MachineRepresentation rep) {
|
||||||
// Must be kept in sync with {MoveTempLocationTo}.
|
// Must be kept in sync with {MoveTempLocationTo}.
|
||||||
move_cycle_.temps.emplace(tasm());
|
move_cycle_.temps.emplace(masm());
|
||||||
auto& temps = *move_cycle_.temps;
|
auto& temps = *move_cycle_.temps;
|
||||||
// Temporarily exclude the reserved scratch registers while we pick a
|
// Temporarily exclude the reserved scratch registers while we pick a
|
||||||
// location to resolve the cycle. Re-include them immediately afterwards so
|
// location to resolve the cycle. Re-include them immediately afterwards so
|
||||||
@ -4184,7 +4184,7 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
|
|||||||
InstructionOperand& destination = move->destination();
|
InstructionOperand& destination = move->destination();
|
||||||
MoveType::Type move_type =
|
MoveType::Type move_type =
|
||||||
MoveType::InferMove(&move->source(), &move->destination());
|
MoveType::InferMove(&move->source(), &move->destination());
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
if (move_type == MoveType::kStackToStack) {
|
if (move_type == MoveType::kStackToStack) {
|
||||||
if (source.IsStackSlot() || source.IsFloatStackSlot()) {
|
if (source.IsStackSlot() || source.IsFloatStackSlot()) {
|
||||||
SwVfpRegister temp = temps.AcquireS();
|
SwVfpRegister temp = temps.AcquireS();
|
||||||
@ -4224,7 +4224,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
DCHECK(destination->IsFloatRegister());
|
DCHECK(destination->IsFloatRegister());
|
||||||
// GapResolver may give us reg codes that don't map to actual
|
// GapResolver may give us reg codes that don't map to actual
|
||||||
// s-registers. Generate code to work around those cases.
|
// s-registers. Generate code to work around those cases.
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
LowDwVfpRegister temp = temps.AcquireLowD();
|
LowDwVfpRegister temp = temps.AcquireLowD();
|
||||||
int src_code = LocationOperand::cast(source)->register_code();
|
int src_code = LocationOperand::cast(source)->register_code();
|
||||||
int dst_code = LocationOperand::cast(destination)->register_code();
|
int dst_code = LocationOperand::cast(destination)->register_code();
|
||||||
@ -4241,20 +4241,20 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
MemOperand dst = g.ToMemOperand(destination);
|
MemOperand dst = g.ToMemOperand(destination);
|
||||||
if (source->IsRegister()) {
|
if (source->IsRegister()) {
|
||||||
Register src = g.ToRegister(source);
|
Register src = g.ToRegister(source);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister temp = temps.AcquireS();
|
SwVfpRegister temp = temps.AcquireS();
|
||||||
__ vmov(temp, src);
|
__ vmov(temp, src);
|
||||||
__ ldr(src, dst);
|
__ ldr(src, dst);
|
||||||
__ vstr(temp, dst);
|
__ vstr(temp, dst);
|
||||||
} else if (source->IsFloatRegister()) {
|
} else if (source->IsFloatRegister()) {
|
||||||
int src_code = LocationOperand::cast(source)->register_code();
|
int src_code = LocationOperand::cast(source)->register_code();
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
LowDwVfpRegister temp = temps.AcquireLowD();
|
LowDwVfpRegister temp = temps.AcquireLowD();
|
||||||
__ VmovExtended(temp.low().code(), src_code);
|
__ VmovExtended(temp.low().code(), src_code);
|
||||||
__ VmovExtended(src_code, dst);
|
__ VmovExtended(src_code, dst);
|
||||||
__ vstr(temp.low(), dst);
|
__ vstr(temp.low(), dst);
|
||||||
} else if (source->IsDoubleRegister()) {
|
} else if (source->IsDoubleRegister()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
DwVfpRegister temp = temps.AcquireD();
|
DwVfpRegister temp = temps.AcquireD();
|
||||||
DwVfpRegister src = g.ToDoubleRegister(source);
|
DwVfpRegister src = g.ToDoubleRegister(source);
|
||||||
__ Move(temp, src);
|
__ Move(temp, src);
|
||||||
@ -4262,7 +4262,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
__ vstr(temp, dst);
|
__ vstr(temp, dst);
|
||||||
} else {
|
} else {
|
||||||
QwNeonRegister src = g.ToSimd128Register(source);
|
QwNeonRegister src = g.ToSimd128Register(source);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register temp = temps.Acquire();
|
Register temp = temps.Acquire();
|
||||||
QwNeonRegister temp_q = temps.AcquireQ();
|
QwNeonRegister temp_q = temps.AcquireQ();
|
||||||
__ Move(temp_q, src);
|
__ Move(temp_q, src);
|
||||||
@ -4276,7 +4276,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
MemOperand src = g.ToMemOperand(source);
|
MemOperand src = g.ToMemOperand(source);
|
||||||
MemOperand dst = g.ToMemOperand(destination);
|
MemOperand dst = g.ToMemOperand(destination);
|
||||||
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
|
if (source->IsStackSlot() || source->IsFloatStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
SwVfpRegister temp_0 = temps.AcquireS();
|
SwVfpRegister temp_0 = temps.AcquireS();
|
||||||
SwVfpRegister temp_1 = temps.AcquireS();
|
SwVfpRegister temp_1 = temps.AcquireS();
|
||||||
__ vldr(temp_0, dst);
|
__ vldr(temp_0, dst);
|
||||||
@ -4284,7 +4284,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
__ vstr(temp_0, src);
|
__ vstr(temp_0, src);
|
||||||
__ vstr(temp_1, dst);
|
__ vstr(temp_1, dst);
|
||||||
} else if (source->IsDoubleStackSlot()) {
|
} else if (source->IsDoubleStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
LowDwVfpRegister temp = temps.AcquireLowD();
|
LowDwVfpRegister temp = temps.AcquireLowD();
|
||||||
if (temps.CanAcquireD()) {
|
if (temps.CanAcquireD()) {
|
||||||
DwVfpRegister temp_0 = temp;
|
DwVfpRegister temp_0 = temp;
|
||||||
@ -4317,7 +4317,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
MemOperand dst0 = dst;
|
MemOperand dst0 = dst;
|
||||||
MemOperand src1(src.rn(), src.offset() + kDoubleSize);
|
MemOperand src1(src.rn(), src.offset() + kDoubleSize);
|
||||||
MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
|
MemOperand dst1(dst.rn(), dst.offset() + kDoubleSize);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
DwVfpRegister temp_0 = temps.AcquireD();
|
DwVfpRegister temp_0 = temps.AcquireD();
|
||||||
DwVfpRegister temp_1 = temps.AcquireD();
|
DwVfpRegister temp_1 = temps.AcquireD();
|
||||||
__ vldr(temp_0, dst0);
|
__ vldr(temp_0, dst0);
|
||||||
|
@ -397,7 +397,7 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
|
|||||||
if (int_matcher.HasResolvedValue()) {
|
if (int_matcher.HasResolvedValue()) {
|
||||||
ptrdiff_t const delta =
|
ptrdiff_t const delta =
|
||||||
int_matcher.ResolvedValue() +
|
int_matcher.ResolvedValue() +
|
||||||
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
|
||||||
selector->isolate(), m.ResolvedValue());
|
selector->isolate(), m.ResolvedValue());
|
||||||
input_count = 1;
|
input_count = 1;
|
||||||
inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
|
inputs[0] = g.UseImmediate(static_cast<int32_t>(delta));
|
||||||
@ -753,7 +753,7 @@ void VisitStoreCommon(InstructionSelector* selector, Node* node,
|
|||||||
if (int_matcher.HasResolvedValue()) {
|
if (int_matcher.HasResolvedValue()) {
|
||||||
ptrdiff_t const delta =
|
ptrdiff_t const delta =
|
||||||
int_matcher.ResolvedValue() +
|
int_matcher.ResolvedValue() +
|
||||||
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
|
||||||
selector->isolate(), m.ResolvedValue());
|
selector->isolate(), m.ResolvedValue());
|
||||||
int input_count = 2;
|
int input_count = 2;
|
||||||
InstructionOperand inputs[2];
|
InstructionOperand inputs[2];
|
||||||
|
@ -24,7 +24,7 @@ namespace v8 {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
namespace compiler {
|
namespace compiler {
|
||||||
|
|
||||||
#define __ tasm()->
|
#define __ masm()->
|
||||||
|
|
||||||
// Adds Arm64-specific methods to convert InstructionOperands.
|
// Adds Arm64-specific methods to convert InstructionOperands.
|
||||||
class Arm64OperandConverter final : public InstructionOperandConverter {
|
class Arm64OperandConverter final : public InstructionOperandConverter {
|
||||||
@ -238,13 +238,13 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
|
|||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
MemOperand ToMemOperand(InstructionOperand* op, TurboAssembler* tasm) const {
|
MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
|
||||||
DCHECK_NOT_NULL(op);
|
DCHECK_NOT_NULL(op);
|
||||||
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
|
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
|
||||||
return SlotToMemOperand(AllocatedOperand::cast(op)->index(), tasm);
|
return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemOperand SlotToMemOperand(int slot, TurboAssembler* tasm) const {
|
MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
|
||||||
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
|
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
|
||||||
if (offset.from_frame_pointer()) {
|
if (offset.from_frame_pointer()) {
|
||||||
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
|
int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
|
||||||
@ -284,7 +284,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
|
|
||||||
void Generate() final {
|
void Generate() final {
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
__ DecompressTaggedPointer(value_, value_);
|
__ DecompressTagged(value_, value_);
|
||||||
}
|
}
|
||||||
__ CheckPageFlag(
|
__ CheckPageFlag(
|
||||||
value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
|
||||||
@ -294,7 +294,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
: SaveFPRegsMode::kIgnore;
|
: SaveFPRegsMode::kIgnore;
|
||||||
if (must_save_lr_) {
|
if (must_save_lr_) {
|
||||||
// We need to save and restore lr if the frame was elided.
|
// We need to save and restore lr if the frame was elided.
|
||||||
__ Push<TurboAssembler::kSignLR>(lr, padreg);
|
__ Push<MacroAssembler::kSignLR>(lr, padreg);
|
||||||
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
|
unwinding_info_writer_->MarkLinkRegisterOnTopOfStack(__ pc_offset(), sp);
|
||||||
}
|
}
|
||||||
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
|
if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
|
||||||
@ -311,7 +311,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
__ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
|
__ CallRecordWriteStubSaveRegisters(object_, offset_, save_fp_mode);
|
||||||
}
|
}
|
||||||
if (must_save_lr_) {
|
if (must_save_lr_) {
|
||||||
__ Pop<TurboAssembler::kAuthLR>(padreg, lr);
|
__ Pop<MacroAssembler::kAuthLR>(padreg, lr);
|
||||||
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
|
unwinding_info_writer_->MarkPopLinkRegisterFromTopOfStack(__ pc_offset());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -459,14 +459,14 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
|
|||||||
|
|
||||||
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
|
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
|
||||||
template <typename Fn>
|
template <typename Fn>
|
||||||
void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
|
void EmitFpOrNeonUnop(MacroAssembler* masm, Fn fn, Instruction* instr,
|
||||||
Arm64OperandConverter i, VectorFormat scalar,
|
Arm64OperandConverter i, VectorFormat scalar,
|
||||||
VectorFormat vector) {
|
VectorFormat vector) {
|
||||||
VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar;
|
VectorFormat f = instr->InputAt(0)->IsSimd128Register() ? vector : scalar;
|
||||||
|
|
||||||
VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f);
|
VRegister output = VRegister::Create(i.OutputDoubleRegister().code(), f);
|
||||||
VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f);
|
VRegister input = VRegister::Create(i.InputDoubleRegister(0).code(), f);
|
||||||
(tasm->*fn)(output, input);
|
(masm->*fn)(output, input);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -539,13 +539,13 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
|
|||||||
|
|
||||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||||
do { \
|
do { \
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
|
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
#define ASSEMBLE_IEEE754_UNOP(name) \
|
#define ASSEMBLE_IEEE754_UNOP(name) \
|
||||||
do { \
|
do { \
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
|
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
@ -558,7 +558,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
|
|||||||
__ asm_imm(i.OutputSimd128Register().format(), \
|
__ asm_imm(i.OutputSimd128Register().format(), \
|
||||||
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
|
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
|
||||||
} else { \
|
} else { \
|
||||||
UseScratchRegisterScope temps(tasm()); \
|
UseScratchRegisterScope temps(masm()); \
|
||||||
VRegister tmp = temps.AcquireQ(); \
|
VRegister tmp = temps.AcquireQ(); \
|
||||||
Register shift = temps.Acquire##gp(); \
|
Register shift = temps.Acquire##gp(); \
|
||||||
constexpr int mask = (1 << width) - 1; \
|
constexpr int mask = (1 << width) - 1; \
|
||||||
@ -578,7 +578,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
|
|||||||
__ asm_imm(i.OutputSimd128Register().format(), \
|
__ asm_imm(i.OutputSimd128Register().format(), \
|
||||||
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
|
i.InputSimd128Register(0).format(), i.InputInt##width(1)); \
|
||||||
} else { \
|
} else { \
|
||||||
UseScratchRegisterScope temps(tasm()); \
|
UseScratchRegisterScope temps(masm()); \
|
||||||
VRegister tmp = temps.AcquireQ(); \
|
VRegister tmp = temps.AcquireQ(); \
|
||||||
Register shift = temps.Acquire##gp(); \
|
Register shift = temps.Acquire##gp(); \
|
||||||
constexpr int mask = (1 << width) - 1; \
|
constexpr int mask = (1 << width) - 1; \
|
||||||
@ -592,7 +592,7 @@ void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
|
|||||||
|
|
||||||
void CodeGenerator::AssembleDeconstructFrame() {
|
void CodeGenerator::AssembleDeconstructFrame() {
|
||||||
__ Mov(sp, fp);
|
__ Mov(sp, fp);
|
||||||
__ Pop<TurboAssembler::kAuthLR>(fp, lr);
|
__ Pop<MacroAssembler::kAuthLR>(fp, lr);
|
||||||
|
|
||||||
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
|
unwinding_info_writer_.MarkFrameDeconstructed(__ pc_offset());
|
||||||
}
|
}
|
||||||
@ -606,7 +606,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
void AdjustStackPointerForTailCall(MacroAssembler* masm,
|
||||||
FrameAccessState* state,
|
FrameAccessState* state,
|
||||||
int new_slot_above_sp,
|
int new_slot_above_sp,
|
||||||
bool allow_shrinkage = true) {
|
bool allow_shrinkage = true) {
|
||||||
@ -615,10 +615,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
|||||||
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
||||||
DCHECK_EQ(stack_slot_delta % 2, 0);
|
DCHECK_EQ(stack_slot_delta % 2, 0);
|
||||||
if (stack_slot_delta > 0) {
|
if (stack_slot_delta > 0) {
|
||||||
tasm->Claim(stack_slot_delta);
|
masm->Claim(stack_slot_delta);
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
||||||
tasm->Drop(-stack_slot_delta);
|
masm->Drop(-stack_slot_delta);
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -627,14 +627,14 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
|||||||
|
|
||||||
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||||
int first_unused_slot_offset) {
|
int first_unused_slot_offset) {
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset, false);
|
first_unused_slot_offset, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||||
int first_unused_slot_offset) {
|
int first_unused_slot_offset) {
|
||||||
DCHECK_EQ(first_unused_slot_offset % 2, 0);
|
DCHECK_EQ(first_unused_slot_offset % 2, 0);
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset);
|
first_unused_slot_offset);
|
||||||
DCHECK(instr->IsTailCall());
|
DCHECK(instr->IsTailCall());
|
||||||
InstructionOperandConverter g(this, instr);
|
InstructionOperandConverter g(this, instr);
|
||||||
@ -646,7 +646,7 @@ void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
|||||||
|
|
||||||
// Check that {kJavaScriptCallCodeStartRegister} is correct.
|
// Check that {kJavaScriptCallCodeStartRegister} is correct.
|
||||||
void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
__ ComputeCodeStartAddress(scratch);
|
__ ComputeCodeStartAddress(scratch);
|
||||||
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
|
__ cmp(scratch, kJavaScriptCallCodeStartRegister);
|
||||||
@ -705,7 +705,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ Jump(wasm_code, constant.rmode());
|
__ Jump(wasm_code, constant.rmode());
|
||||||
} else {
|
} else {
|
||||||
Register target = i.InputRegister(0);
|
Register target = i.InputRegister(0);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
temps.Exclude(x17);
|
temps.Exclude(x17);
|
||||||
__ Mov(x17, target);
|
__ Mov(x17, target);
|
||||||
__ Jump(x17);
|
__ Jump(x17);
|
||||||
@ -737,7 +737,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
DCHECK_IMPLIES(
|
DCHECK_IMPLIES(
|
||||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||||
reg == kJavaScriptCallCodeStartRegister);
|
reg == kJavaScriptCallCodeStartRegister);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
temps.Exclude(x17);
|
temps.Exclude(x17);
|
||||||
__ Mov(x17, reg);
|
__ Mov(x17, reg);
|
||||||
__ Jump(x17);
|
__ Jump(x17);
|
||||||
@ -750,16 +750,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
Register func = i.InputRegister(0);
|
Register func = i.InputRegister(0);
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
// Check the function's context matches the context argument.
|
// Check the function's context matches the context argument.
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register temp = scope.AcquireX();
|
Register temp = scope.AcquireX();
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(temp,
|
||||||
temp, FieldMemOperand(func, JSFunction::kContextOffset));
|
FieldMemOperand(func, JSFunction::kContextOffset));
|
||||||
__ cmp(cp, temp);
|
__ cmp(cp, temp);
|
||||||
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(x2,
|
__ LoadTaggedField(x2, FieldMemOperand(func, JSFunction::kCodeOffset));
|
||||||
FieldMemOperand(func, JSFunction::kCodeOffset));
|
|
||||||
__ CallCodeObject(x2);
|
__ CallCodeObject(x2);
|
||||||
RecordCallPosition(instr);
|
RecordCallPosition(instr);
|
||||||
frame_access_state()->ClearSPDelta();
|
frame_access_state()->ClearSPDelta();
|
||||||
@ -860,7 +859,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
{
|
{
|
||||||
// We don't actually want to generate a pile of code for this, so just
|
// We don't actually want to generate a pile of code for this, so just
|
||||||
// claim there is a stack frame, without generating one.
|
// claim there is a stack frame, without generating one.
|
||||||
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
|
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
|
||||||
__ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
|
__ Call(BUILTIN_CODE(isolate(), AbortCSADcheck),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -1051,39 +1050,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
ASSEMBLE_IEEE754_UNOP(tanh);
|
ASSEMBLE_IEEE754_UNOP(tanh);
|
||||||
break;
|
break;
|
||||||
case kArm64Float32RoundDown:
|
case kArm64Float32RoundDown:
|
||||||
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatS,
|
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatS,
|
||||||
kFormat4S);
|
kFormat4S);
|
||||||
break;
|
break;
|
||||||
case kArm64Float64RoundDown:
|
case kArm64Float64RoundDown:
|
||||||
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintm, instr, i, kFormatD,
|
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintm, instr, i, kFormatD,
|
||||||
kFormat2D);
|
kFormat2D);
|
||||||
break;
|
break;
|
||||||
case kArm64Float32RoundUp:
|
case kArm64Float32RoundUp:
|
||||||
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatS,
|
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatS,
|
||||||
kFormat4S);
|
kFormat4S);
|
||||||
break;
|
break;
|
||||||
case kArm64Float64RoundUp:
|
case kArm64Float64RoundUp:
|
||||||
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintp, instr, i, kFormatD,
|
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintp, instr, i, kFormatD,
|
||||||
kFormat2D);
|
kFormat2D);
|
||||||
break;
|
break;
|
||||||
case kArm64Float64RoundTiesAway:
|
case kArm64Float64RoundTiesAway:
|
||||||
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frinta, instr, i, kFormatD,
|
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frinta, instr, i, kFormatD,
|
||||||
kFormat2D);
|
kFormat2D);
|
||||||
break;
|
break;
|
||||||
case kArm64Float32RoundTruncate:
|
case kArm64Float32RoundTruncate:
|
||||||
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatS,
|
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatS,
|
||||||
kFormat4S);
|
kFormat4S);
|
||||||
break;
|
break;
|
||||||
case kArm64Float64RoundTruncate:
|
case kArm64Float64RoundTruncate:
|
||||||
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintz, instr, i, kFormatD,
|
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintz, instr, i, kFormatD,
|
||||||
kFormat2D);
|
kFormat2D);
|
||||||
break;
|
break;
|
||||||
case kArm64Float32RoundTiesEven:
|
case kArm64Float32RoundTiesEven:
|
||||||
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatS,
|
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatS,
|
||||||
kFormat4S);
|
kFormat4S);
|
||||||
break;
|
break;
|
||||||
case kArm64Float64RoundTiesEven:
|
case kArm64Float64RoundTiesEven:
|
||||||
EmitFpOrNeonUnop(tasm(), &TurboAssembler::Frintn, instr, i, kFormatD,
|
EmitFpOrNeonUnop(masm(), &MacroAssembler::Frintn, instr, i, kFormatD,
|
||||||
kFormat2D);
|
kFormat2D);
|
||||||
break;
|
break;
|
||||||
case kArm64Add:
|
case kArm64Add:
|
||||||
@ -1314,14 +1313,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
|
__ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
|
||||||
break;
|
break;
|
||||||
case kArm64Imod: {
|
case kArm64Imod: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register temp = scope.AcquireX();
|
Register temp = scope.AcquireX();
|
||||||
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
|
__ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
|
||||||
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
|
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArm64Imod32: {
|
case kArm64Imod32: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register temp = scope.AcquireW();
|
Register temp = scope.AcquireW();
|
||||||
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
|
__ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
|
||||||
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
|
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
|
||||||
@ -1329,14 +1328,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArm64Umod: {
|
case kArm64Umod: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register temp = scope.AcquireX();
|
Register temp = scope.AcquireX();
|
||||||
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
|
__ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
|
||||||
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
|
__ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArm64Umod32: {
|
case kArm64Umod32: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register temp = scope.AcquireW();
|
Register temp = scope.AcquireW();
|
||||||
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
|
__ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
|
||||||
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
|
__ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
|
||||||
@ -1650,7 +1649,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
case kArm64Float64Mod: {
|
case kArm64Float64Mod: {
|
||||||
// TODO(turbofan): implement directly.
|
// TODO(turbofan): implement directly.
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL);
|
FrameScope scope(masm(), StackFrame::MANUAL);
|
||||||
DCHECK_EQ(d0, i.InputDoubleRegister(0));
|
DCHECK_EQ(d0, i.InputDoubleRegister(0));
|
||||||
DCHECK_EQ(d1, i.InputDoubleRegister(1));
|
DCHECK_EQ(d1, i.InputDoubleRegister(1));
|
||||||
DCHECK_EQ(d0, i.OutputDoubleRegister());
|
DCHECK_EQ(d0, i.OutputDoubleRegister());
|
||||||
@ -1890,23 +1889,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArm64LdrDecompressTaggedSigned:
|
case kArm64LdrDecompressTaggedSigned:
|
||||||
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
|
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
|
||||||
break;
|
break;
|
||||||
case kArm64LdrDecompressTaggedPointer:
|
case kArm64LdrDecompressTagged:
|
||||||
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
|
__ DecompressTagged(i.OutputRegister(), i.MemoryOperand());
|
||||||
break;
|
|
||||||
case kArm64LdrDecompressAnyTagged:
|
|
||||||
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
|
|
||||||
break;
|
break;
|
||||||
case kArm64LdarDecompressTaggedSigned:
|
case kArm64LdarDecompressTaggedSigned:
|
||||||
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
|
__ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
|
||||||
i.InputRegister(1), i.TempRegister(0));
|
i.InputRegister(1), i.TempRegister(0));
|
||||||
break;
|
break;
|
||||||
case kArm64LdarDecompressTaggedPointer:
|
case kArm64LdarDecompressTagged:
|
||||||
__ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
|
__ AtomicDecompressTagged(i.OutputRegister(), i.InputRegister(0),
|
||||||
i.InputRegister(1), i.TempRegister(0));
|
i.InputRegister(1), i.TempRegister(0));
|
||||||
break;
|
|
||||||
case kArm64LdarDecompressAnyTagged:
|
|
||||||
__ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
|
|
||||||
i.InputRegister(1), i.TempRegister(0));
|
|
||||||
break;
|
break;
|
||||||
case kArm64LdrDecodeSandboxedPointer:
|
case kArm64LdrDecodeSandboxedPointer:
|
||||||
__ LoadSandboxedPointerField(i.OutputRegister(), i.MemoryOperand());
|
__ LoadSandboxedPointerField(i.OutputRegister(), i.MemoryOperand());
|
||||||
@ -2369,7 +2361,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
|
SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
|
||||||
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
|
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
|
||||||
case kArm64I64x2Mul: {
|
case kArm64I64x2Mul: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister dst = i.OutputSimd128Register();
|
VRegister dst = i.OutputSimd128Register();
|
||||||
VRegister src1 = i.InputSimd128Register(0);
|
VRegister src1 = i.InputSimd128Register(0);
|
||||||
VRegister src2 = i.InputSimd128Register(1);
|
VRegister src2 = i.InputSimd128Register(1);
|
||||||
@ -2470,7 +2462,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
|
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
|
||||||
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
|
SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
|
||||||
case kArm64I32x4BitMask: {
|
case kArm64I32x4BitMask: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register dst = i.OutputRegister32();
|
Register dst = i.OutputRegister32();
|
||||||
VRegister src = i.InputSimd128Register(0);
|
VRegister src = i.InputSimd128Register(0);
|
||||||
VRegister tmp = scope.AcquireQ();
|
VRegister tmp = scope.AcquireQ();
|
||||||
@ -2486,7 +2478,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArm64I32x4DotI16x8S: {
|
case kArm64I32x4DotI16x8S: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister lhs = i.InputSimd128Register(0);
|
VRegister lhs = i.InputSimd128Register(0);
|
||||||
VRegister rhs = i.InputSimd128Register(1);
|
VRegister rhs = i.InputSimd128Register(1);
|
||||||
VRegister tmp1 = scope.AcquireV(kFormat4S);
|
VRegister tmp1 = scope.AcquireV(kFormat4S);
|
||||||
@ -2497,7 +2489,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArm64I16x8DotI8x16S: {
|
case kArm64I16x8DotI8x16S: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister lhs = i.InputSimd128Register(0);
|
VRegister lhs = i.InputSimd128Register(0);
|
||||||
VRegister rhs = i.InputSimd128Register(1);
|
VRegister rhs = i.InputSimd128Register(1);
|
||||||
VRegister tmp1 = scope.AcquireV(kFormat8H);
|
VRegister tmp1 = scope.AcquireV(kFormat8H);
|
||||||
@ -2515,7 +2507,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
i.InputSimd128Register(1).V16B());
|
i.InputSimd128Register(1).V16B());
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister lhs = i.InputSimd128Register(0);
|
VRegister lhs = i.InputSimd128Register(0);
|
||||||
VRegister rhs = i.InputSimd128Register(1);
|
VRegister rhs = i.InputSimd128Register(1);
|
||||||
VRegister tmp1 = scope.AcquireV(kFormat8H);
|
VRegister tmp1 = scope.AcquireV(kFormat8H);
|
||||||
@ -2553,7 +2545,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
VRegister dst = i.OutputSimd128Register(),
|
VRegister dst = i.OutputSimd128Register(),
|
||||||
src0 = i.InputSimd128Register(0),
|
src0 = i.InputSimd128Register(0),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister temp = scope.AcquireV(kFormat4S);
|
VRegister temp = scope.AcquireV(kFormat4S);
|
||||||
if (dst == src1) {
|
if (dst == src1) {
|
||||||
__ Mov(temp, src1.V4S());
|
__ Mov(temp, src1.V4S());
|
||||||
@ -2574,7 +2566,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
VRegister dst = i.OutputSimd128Register(),
|
VRegister dst = i.OutputSimd128Register(),
|
||||||
src0 = i.InputSimd128Register(0),
|
src0 = i.InputSimd128Register(0),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister temp = scope.AcquireV(kFormat4S);
|
VRegister temp = scope.AcquireV(kFormat4S);
|
||||||
if (dst == src1) {
|
if (dst == src1) {
|
||||||
__ Mov(temp, src1.V4S());
|
__ Mov(temp, src1.V4S());
|
||||||
@ -2588,7 +2580,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
|
SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
|
||||||
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
|
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
|
||||||
case kArm64I16x8BitMask: {
|
case kArm64I16x8BitMask: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register dst = i.OutputRegister32();
|
Register dst = i.OutputRegister32();
|
||||||
VRegister src = i.InputSimd128Register(0);
|
VRegister src = i.InputSimd128Register(0);
|
||||||
VRegister tmp = scope.AcquireQ();
|
VRegister tmp = scope.AcquireQ();
|
||||||
@ -2615,7 +2607,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
VRegister dst = i.OutputSimd128Register(),
|
VRegister dst = i.OutputSimd128Register(),
|
||||||
src0 = i.InputSimd128Register(0),
|
src0 = i.InputSimd128Register(0),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister temp = scope.AcquireV(kFormat8H);
|
VRegister temp = scope.AcquireV(kFormat8H);
|
||||||
if (dst == src1) {
|
if (dst == src1) {
|
||||||
__ Mov(temp, src1.V8H());
|
__ Mov(temp, src1.V8H());
|
||||||
@ -2633,7 +2625,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
VRegister dst = i.OutputSimd128Register(),
|
VRegister dst = i.OutputSimd128Register(),
|
||||||
src0 = i.InputSimd128Register(0),
|
src0 = i.InputSimd128Register(0),
|
||||||
src1 = i.InputSimd128Register(1);
|
src1 = i.InputSimd128Register(1);
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister temp = scope.AcquireV(kFormat8H);
|
VRegister temp = scope.AcquireV(kFormat8H);
|
||||||
if (dst == src1) {
|
if (dst == src1) {
|
||||||
__ Mov(temp, src1.V8H());
|
__ Mov(temp, src1.V8H());
|
||||||
@ -2644,7 +2636,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArm64I8x16BitMask: {
|
case kArm64I8x16BitMask: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register dst = i.OutputRegister32();
|
Register dst = i.OutputRegister32();
|
||||||
VRegister src = i.InputSimd128Register(0);
|
VRegister src = i.InputSimd128Register(0);
|
||||||
VRegister tmp = scope.AcquireQ();
|
VRegister tmp = scope.AcquireQ();
|
||||||
@ -2733,7 +2725,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
src1 = i.InputSimd128Register(1).V4S();
|
src1 = i.InputSimd128Register(1).V4S();
|
||||||
// Check for in-place shuffles.
|
// Check for in-place shuffles.
|
||||||
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
|
// If dst == src0 == src1, then the shuffle is unary and we only use src0.
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister temp = scope.AcquireV(kFormat4S);
|
VRegister temp = scope.AcquireV(kFormat4S);
|
||||||
if (dst == src0) {
|
if (dst == src0) {
|
||||||
__ Mov(temp, src0);
|
__ Mov(temp, src0);
|
||||||
@ -2799,7 +2791,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0
|
DCHECK_EQ(0, (imm1 | imm2) & (src0 == src1 ? 0xF0F0F0F0F0F0F0F0
|
||||||
: 0xE0E0E0E0E0E0E0E0));
|
: 0xE0E0E0E0E0E0E0E0));
|
||||||
|
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister temp = scope.AcquireV(kFormat16B);
|
VRegister temp = scope.AcquireV(kFormat16B);
|
||||||
__ Movi(temp, imm2, imm1);
|
__ Movi(temp, imm2, imm1);
|
||||||
|
|
||||||
@ -2878,7 +2870,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArm64V128AnyTrue: {
|
case kArm64V128AnyTrue: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
// For AnyTrue, the format does not matter; also, we would like to avoid
|
// For AnyTrue, the format does not matter; also, we would like to avoid
|
||||||
// an expensive horizontal reduction.
|
// an expensive horizontal reduction.
|
||||||
VRegister temp = scope.AcquireV(kFormat4S);
|
VRegister temp = scope.AcquireV(kFormat4S);
|
||||||
@ -2891,7 +2883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
|
#define SIMD_REDUCE_OP_CASE(Op, Instr, format, FORMAT) \
|
||||||
case Op: { \
|
case Op: { \
|
||||||
UseScratchRegisterScope scope(tasm()); \
|
UseScratchRegisterScope scope(masm()); \
|
||||||
VRegister temp = scope.AcquireV(format); \
|
VRegister temp = scope.AcquireV(format); \
|
||||||
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
|
__ Instr(temp, i.InputSimd128Register(0).V##FORMAT()); \
|
||||||
__ Umov(i.OutputRegister32(), temp, 0); \
|
__ Umov(i.OutputRegister32(), temp, 0); \
|
||||||
@ -3045,7 +3037,7 @@ void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
|
|||||||
|
|
||||||
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
|
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
|
||||||
Arm64OperandConverter i(this, instr);
|
Arm64OperandConverter i(this, instr);
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register input = i.InputRegister32(0);
|
Register input = i.InputRegister32(0);
|
||||||
Register temp = scope.AcquireX();
|
Register temp = scope.AcquireX();
|
||||||
size_t const case_count = instr->InputCount() - 2;
|
size_t const case_count = instr->InputCount() - 2;
|
||||||
@ -3066,7 +3058,7 @@ void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
|
|||||||
{
|
{
|
||||||
const size_t instruction_count =
|
const size_t instruction_count =
|
||||||
case_count * instructions_per_case + instructions_per_jump_target;
|
case_count * instructions_per_case + instructions_per_jump_target;
|
||||||
TurboAssembler::BlockPoolsScope block_pools(tasm(),
|
MacroAssembler::BlockPoolsScope block_pools(masm(),
|
||||||
instruction_count * kInstrSize);
|
instruction_count * kInstrSize);
|
||||||
__ Bind(&table);
|
__ Bind(&table);
|
||||||
for (size_t index = 0; index < case_count; ++index) {
|
for (size_t index = 0; index < case_count; ++index) {
|
||||||
@ -3125,10 +3117,10 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
DCHECK_EQ(required_slots % 2, 1);
|
DCHECK_EQ(required_slots % 2, 1);
|
||||||
__ Prologue();
|
__ Prologue();
|
||||||
// Update required_slots count since we have just claimed one extra slot.
|
// Update required_slots count since we have just claimed one extra slot.
|
||||||
static_assert(TurboAssembler::kExtraSlotClaimedByPrologue == 1);
|
static_assert(MacroAssembler::kExtraSlotClaimedByPrologue == 1);
|
||||||
required_slots -= TurboAssembler::kExtraSlotClaimedByPrologue;
|
required_slots -= MacroAssembler::kExtraSlotClaimedByPrologue;
|
||||||
} else {
|
} else {
|
||||||
__ Push<TurboAssembler::kSignLR>(lr, fp);
|
__ Push<MacroAssembler::kSignLR>(lr, fp);
|
||||||
__ Mov(fp, sp);
|
__ Mov(fp, sp);
|
||||||
}
|
}
|
||||||
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
|
unwinding_info_writer_.MarkFrameConstructed(__ pc_offset());
|
||||||
@ -3151,7 +3143,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
// One unoptimized frame slot has already been claimed when the actual
|
// One unoptimized frame slot has already been claimed when the actual
|
||||||
// arguments count was pushed.
|
// arguments count was pushed.
|
||||||
required_slots -=
|
required_slots -=
|
||||||
unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
|
unoptimized_frame_slots - MacroAssembler::kExtraSlotClaimedByPrologue;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
@ -3165,7 +3157,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
// exception unconditionally. Thereby we can avoid the integer overflow
|
// exception unconditionally. Thereby we can avoid the integer overflow
|
||||||
// check in the condition code.
|
// check in the condition code.
|
||||||
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
|
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register scratch = scope.AcquireX();
|
Register scratch = scope.AcquireX();
|
||||||
__ Ldr(scratch, FieldMemOperand(
|
__ Ldr(scratch, FieldMemOperand(
|
||||||
kWasmInstanceRegister,
|
kWasmInstanceRegister,
|
||||||
@ -3178,7 +3170,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
|
|
||||||
{
|
{
|
||||||
// Finish the frame that hasn't been fully built yet.
|
// Finish the frame that hasn't been fully built yet.
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
__ Mov(scratch,
|
__ Mov(scratch,
|
||||||
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
|
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
|
||||||
@ -3209,7 +3201,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
__ Claim(required_slots);
|
__ Claim(required_slots);
|
||||||
break;
|
break;
|
||||||
case CallDescriptor::kCallCodeObject: {
|
case CallDescriptor::kCallCodeObject: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
__ Mov(scratch,
|
__ Mov(scratch,
|
||||||
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
|
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
|
||||||
@ -3225,7 +3217,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
}
|
}
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
case CallDescriptor::kCallWasmFunction: {
|
case CallDescriptor::kCallWasmFunction: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
__ Mov(scratch,
|
__ Mov(scratch,
|
||||||
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
|
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
|
||||||
@ -3235,7 +3227,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
}
|
}
|
||||||
case CallDescriptor::kCallWasmImportWrapper:
|
case CallDescriptor::kCallWasmImportWrapper:
|
||||||
case CallDescriptor::kCallWasmCapiFunction: {
|
case CallDescriptor::kCallWasmCapiFunction: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
__ Mov(scratch,
|
__ Mov(scratch,
|
||||||
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
|
StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
|
||||||
@ -3254,7 +3246,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
case CallDescriptor::kCallAddress:
|
case CallDescriptor::kCallAddress:
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
|
if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
|
__ Mov(scratch, StackFrame::TypeToMarker(StackFrame::C_WASM_ENTRY));
|
||||||
__ Push(scratch, padreg);
|
__ Push(scratch, padreg);
|
||||||
@ -3392,7 +3384,7 @@ void CodeGenerator::PrepareForDeoptimizationExits(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Emit the jumps to deoptimization entries.
|
// Emit the jumps to deoptimization entries.
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register scratch = scope.AcquireX();
|
Register scratch = scope.AcquireX();
|
||||||
static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0);
|
static_assert(static_cast<int>(kFirstDeoptimizeKind) == 0);
|
||||||
for (int i = 0; i < kDeoptimizeKindCount; i++) {
|
for (int i = 0; i < kDeoptimizeKindCount; i++) {
|
||||||
@ -3417,9 +3409,9 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
|
|||||||
__ Push(padreg, g.ToRegister(source));
|
__ Push(padreg, g.ToRegister(source));
|
||||||
frame_access_state()->IncreaseSPDelta(new_slots);
|
frame_access_state()->IncreaseSPDelta(new_slots);
|
||||||
} else if (source->IsStackSlot()) {
|
} else if (source->IsStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
__ Ldr(scratch, g.ToMemOperand(source, tasm()));
|
__ Ldr(scratch, g.ToMemOperand(source, masm()));
|
||||||
__ Push(padreg, scratch);
|
__ Push(padreg, scratch);
|
||||||
frame_access_state()->IncreaseSPDelta(new_slots);
|
frame_access_state()->IncreaseSPDelta(new_slots);
|
||||||
} else {
|
} else {
|
||||||
@ -3440,10 +3432,10 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
|
|||||||
if (dest->IsRegister()) {
|
if (dest->IsRegister()) {
|
||||||
__ Pop(g.ToRegister(dest), padreg);
|
__ Pop(g.ToRegister(dest), padreg);
|
||||||
} else if (dest->IsStackSlot()) {
|
} else if (dest->IsStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.AcquireX();
|
Register scratch = temps.AcquireX();
|
||||||
__ Pop(scratch, padreg);
|
__ Pop(scratch, padreg);
|
||||||
__ Str(scratch, g.ToMemOperand(dest, tasm()));
|
__ Str(scratch, g.ToMemOperand(dest, masm()));
|
||||||
} else {
|
} else {
|
||||||
int last_frame_slot_id =
|
int last_frame_slot_id =
|
||||||
frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
|
frame_access_state_->frame()->GetTotalFrameSlotCount() - 1;
|
||||||
@ -3468,7 +3460,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
|
|||||||
MachineRepresentation rep) {
|
MachineRepresentation rep) {
|
||||||
// Must be kept in sync with {MoveTempLocationTo}.
|
// Must be kept in sync with {MoveTempLocationTo}.
|
||||||
DCHECK(!source->IsImmediate());
|
DCHECK(!source->IsImmediate());
|
||||||
move_cycle_.temps.emplace(tasm());
|
move_cycle_.temps.emplace(masm());
|
||||||
auto& temps = *move_cycle_.temps;
|
auto& temps = *move_cycle_.temps;
|
||||||
// Temporarily exclude the reserved scratch registers while we pick one to
|
// Temporarily exclude the reserved scratch registers while we pick one to
|
||||||
// resolve the move cycle. Re-include them immediately afterwards as they
|
// resolve the move cycle. Re-include them immediately afterwards as they
|
||||||
@ -3506,7 +3498,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
|
|||||||
scratch_reg.code());
|
scratch_reg.code());
|
||||||
Arm64OperandConverter g(this, nullptr);
|
Arm64OperandConverter g(this, nullptr);
|
||||||
if (source->IsStackSlot()) {
|
if (source->IsStackSlot()) {
|
||||||
__ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, tasm()));
|
__ Ldr(g.ToDoubleRegister(&scratch), g.ToMemOperand(source, masm()));
|
||||||
} else {
|
} else {
|
||||||
DCHECK(source->IsRegister());
|
DCHECK(source->IsRegister());
|
||||||
__ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source));
|
__ fmov(g.ToDoubleRegister(&scratch), g.ToRegister(source));
|
||||||
@ -3535,7 +3527,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
|
|||||||
move_cycle_.scratch_reg->code());
|
move_cycle_.scratch_reg->code());
|
||||||
Arm64OperandConverter g(this, nullptr);
|
Arm64OperandConverter g(this, nullptr);
|
||||||
if (dest->IsStackSlot()) {
|
if (dest->IsStackSlot()) {
|
||||||
__ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, tasm()));
|
__ Str(g.ToDoubleRegister(&scratch), g.ToMemOperand(dest, masm()));
|
||||||
} else {
|
} else {
|
||||||
DCHECK(dest->IsRegister());
|
DCHECK(dest->IsRegister());
|
||||||
__ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch));
|
__ fmov(g.ToRegister(dest), g.ToDoubleRegister(&scratch));
|
||||||
@ -3557,9 +3549,9 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
|
|||||||
auto move_type = MoveType::InferMove(&move->source(), &move->destination());
|
auto move_type = MoveType::InferMove(&move->source(), &move->destination());
|
||||||
if (move_type == MoveType::kStackToStack) {
|
if (move_type == MoveType::kStackToStack) {
|
||||||
Arm64OperandConverter g(this, nullptr);
|
Arm64OperandConverter g(this, nullptr);
|
||||||
MemOperand src = g.ToMemOperand(&move->source(), tasm());
|
MemOperand src = g.ToMemOperand(&move->source(), masm());
|
||||||
MemOperand dst = g.ToMemOperand(&move->destination(), tasm());
|
MemOperand dst = g.ToMemOperand(&move->destination(), masm());
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
if (move->source().IsSimd128StackSlot()) {
|
if (move->source().IsSimd128StackSlot()) {
|
||||||
VRegister temp = temps.AcquireQ();
|
VRegister temp = temps.AcquireQ();
|
||||||
move_cycle_.scratch_fp_regs.set(temp);
|
move_cycle_.scratch_fp_regs.set(temp);
|
||||||
@ -3574,11 +3566,11 @@ void CodeGenerator::SetPendingMove(MoveOperands* move) {
|
|||||||
// Offset doesn't fit into the immediate field so the assembler will emit
|
// Offset doesn't fit into the immediate field so the assembler will emit
|
||||||
// two instructions and use a second temp register.
|
// two instructions and use a second temp register.
|
||||||
if ((src.IsImmediateOffset() &&
|
if ((src.IsImmediateOffset() &&
|
||||||
!tasm()->IsImmLSScaled(src_offset, src_size) &&
|
!masm()->IsImmLSScaled(src_offset, src_size) &&
|
||||||
!tasm()->IsImmLSUnscaled(src_offset)) ||
|
!masm()->IsImmLSUnscaled(src_offset)) ||
|
||||||
(dst.IsImmediateOffset() &&
|
(dst.IsImmediateOffset() &&
|
||||||
!tasm()->IsImmLSScaled(dst_offset, dst_size) &&
|
!masm()->IsImmLSScaled(dst_offset, dst_size) &&
|
||||||
!tasm()->IsImmLSUnscaled(dst_offset))) {
|
!masm()->IsImmLSUnscaled(dst_offset))) {
|
||||||
Register temp = temps.AcquireX();
|
Register temp = temps.AcquireX();
|
||||||
move_cycle_.scratch_regs.set(temp);
|
move_cycle_.scratch_regs.set(temp);
|
||||||
}
|
}
|
||||||
@ -3627,7 +3619,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
case MoveType::kRegisterToStack: {
|
case MoveType::kRegisterToStack: {
|
||||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||||
if (source->IsRegister()) {
|
if (source->IsRegister()) {
|
||||||
__ Str(g.ToRegister(source), dst);
|
__ Str(g.ToRegister(source), dst);
|
||||||
} else {
|
} else {
|
||||||
@ -3642,7 +3634,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case MoveType::kStackToRegister: {
|
case MoveType::kStackToRegister: {
|
||||||
MemOperand src = g.ToMemOperand(source, tasm());
|
MemOperand src = g.ToMemOperand(source, masm());
|
||||||
if (destination->IsRegister()) {
|
if (destination->IsRegister()) {
|
||||||
__ Ldr(g.ToRegister(destination), src);
|
__ Ldr(g.ToRegister(destination), src);
|
||||||
} else {
|
} else {
|
||||||
@ -3657,15 +3649,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case MoveType::kStackToStack: {
|
case MoveType::kStackToStack: {
|
||||||
MemOperand src = g.ToMemOperand(source, tasm());
|
MemOperand src = g.ToMemOperand(source, masm());
|
||||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||||
if (source->IsSimd128StackSlot()) {
|
if (source->IsSimd128StackSlot()) {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister temp = scope.AcquireQ();
|
VRegister temp = scope.AcquireQ();
|
||||||
__ Ldr(temp, src);
|
__ Ldr(temp, src);
|
||||||
__ Str(temp, dst);
|
__ Str(temp, dst);
|
||||||
} else {
|
} else {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register temp = scope.AcquireX();
|
Register temp = scope.AcquireX();
|
||||||
__ Ldr(temp, src);
|
__ Ldr(temp, src);
|
||||||
__ Str(temp, dst);
|
__ Str(temp, dst);
|
||||||
@ -3689,9 +3681,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
}
|
}
|
||||||
case MoveType::kConstantToStack: {
|
case MoveType::kConstantToStack: {
|
||||||
Constant src = g.ToConstant(source);
|
Constant src = g.ToConstant(source);
|
||||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||||
if (destination->IsStackSlot()) {
|
if (destination->IsStackSlot()) {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
Register temp = scope.AcquireX();
|
Register temp = scope.AcquireX();
|
||||||
MoveConstantToRegister(temp, src);
|
MoveConstantToRegister(temp, src);
|
||||||
__ Str(temp, dst);
|
__ Str(temp, dst);
|
||||||
@ -3699,7 +3691,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
|
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
|
||||||
__ Str(wzr, dst);
|
__ Str(wzr, dst);
|
||||||
} else {
|
} else {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister temp = scope.AcquireS();
|
VRegister temp = scope.AcquireS();
|
||||||
__ Fmov(temp, src.ToFloat32());
|
__ Fmov(temp, src.ToFloat32());
|
||||||
__ Str(temp, dst);
|
__ Str(temp, dst);
|
||||||
@ -3709,7 +3701,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
if (src.ToFloat64().AsUint64() == 0) {
|
if (src.ToFloat64().AsUint64() == 0) {
|
||||||
__ Str(xzr, dst);
|
__ Str(xzr, dst);
|
||||||
} else {
|
} else {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister temp = scope.AcquireD();
|
VRegister temp = scope.AcquireD();
|
||||||
__ Fmov(temp, src.ToFloat64().value());
|
__ Fmov(temp, src.ToFloat64().value());
|
||||||
__ Str(temp, dst);
|
__ Str(temp, dst);
|
||||||
@ -3740,8 +3732,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
case MoveType::kRegisterToStack: {
|
case MoveType::kRegisterToStack: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||||
if (source->IsRegister()) {
|
if (source->IsRegister()) {
|
||||||
Register temp = scope.AcquireX();
|
Register temp = scope.AcquireX();
|
||||||
Register src = g.ToRegister(source);
|
Register src = g.ToRegister(source);
|
||||||
@ -3749,7 +3741,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
__ Ldr(src, dst);
|
__ Ldr(src, dst);
|
||||||
__ Str(temp, dst);
|
__ Str(temp, dst);
|
||||||
} else {
|
} else {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
VRegister src = g.ToDoubleRegister(source);
|
VRegister src = g.ToDoubleRegister(source);
|
||||||
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
|
if (source->IsFloatRegister() || source->IsDoubleRegister()) {
|
||||||
VRegister temp = scope.AcquireD();
|
VRegister temp = scope.AcquireD();
|
||||||
@ -3767,9 +3759,9 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
case MoveType::kStackToStack: {
|
case MoveType::kStackToStack: {
|
||||||
UseScratchRegisterScope scope(tasm());
|
UseScratchRegisterScope scope(masm());
|
||||||
MemOperand src = g.ToMemOperand(source, tasm());
|
MemOperand src = g.ToMemOperand(source, masm());
|
||||||
MemOperand dst = g.ToMemOperand(destination, tasm());
|
MemOperand dst = g.ToMemOperand(destination, masm());
|
||||||
VRegister temp_0 = scope.AcquireD();
|
VRegister temp_0 = scope.AcquireD();
|
||||||
VRegister temp_1 = scope.AcquireD();
|
VRegister temp_1 = scope.AcquireD();
|
||||||
if (source->IsSimd128StackSlot()) {
|
if (source->IsSimd128StackSlot()) {
|
||||||
|
@ -199,11 +199,9 @@ namespace compiler {
|
|||||||
V(Arm64Float64MoveU64) \
|
V(Arm64Float64MoveU64) \
|
||||||
V(Arm64U64MoveFloat64) \
|
V(Arm64U64MoveFloat64) \
|
||||||
V(Arm64LdrDecompressTaggedSigned) \
|
V(Arm64LdrDecompressTaggedSigned) \
|
||||||
V(Arm64LdrDecompressTaggedPointer) \
|
V(Arm64LdrDecompressTagged) \
|
||||||
V(Arm64LdrDecompressAnyTagged) \
|
|
||||||
V(Arm64LdarDecompressTaggedSigned) \
|
V(Arm64LdarDecompressTaggedSigned) \
|
||||||
V(Arm64LdarDecompressTaggedPointer) \
|
V(Arm64LdarDecompressTagged) \
|
||||||
V(Arm64LdarDecompressAnyTagged) \
|
|
||||||
V(Arm64StrCompressTagged) \
|
V(Arm64StrCompressTagged) \
|
||||||
V(Arm64StlrCompressTagged) \
|
V(Arm64StlrCompressTagged) \
|
||||||
V(Arm64LdrDecodeSandboxedPointer) \
|
V(Arm64LdrDecodeSandboxedPointer) \
|
||||||
|
@ -315,11 +315,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
case kArm64LdrW:
|
case kArm64LdrW:
|
||||||
case kArm64Ldr:
|
case kArm64Ldr:
|
||||||
case kArm64LdrDecompressTaggedSigned:
|
case kArm64LdrDecompressTaggedSigned:
|
||||||
case kArm64LdrDecompressTaggedPointer:
|
case kArm64LdrDecompressTagged:
|
||||||
case kArm64LdrDecompressAnyTagged:
|
|
||||||
case kArm64LdarDecompressTaggedSigned:
|
case kArm64LdarDecompressTaggedSigned:
|
||||||
case kArm64LdarDecompressTaggedPointer:
|
case kArm64LdarDecompressTagged:
|
||||||
case kArm64LdarDecompressAnyTagged:
|
|
||||||
case kArm64LdrDecodeSandboxedPointer:
|
case kArm64LdrDecodeSandboxedPointer:
|
||||||
case kArm64Peek:
|
case kArm64Peek:
|
||||||
case kArm64LoadSplat:
|
case kArm64LoadSplat:
|
||||||
@ -431,8 +429,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
|
|||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
case kArm64LdrDecompressTaggedSigned:
|
case kArm64LdrDecompressTaggedSigned:
|
||||||
case kArm64LdrDecompressTaggedPointer:
|
case kArm64LdrDecompressTagged:
|
||||||
case kArm64LdrDecompressAnyTagged:
|
|
||||||
case kArm64Ldr:
|
case kArm64Ldr:
|
||||||
case kArm64LdrD:
|
case kArm64LdrD:
|
||||||
case kArm64LdrS:
|
case kArm64LdrS:
|
||||||
|
@ -623,7 +623,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
|
|||||||
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
||||||
ptrdiff_t const delta =
|
ptrdiff_t const delta =
|
||||||
g.GetIntegerConstantValue(index) +
|
g.GetIntegerConstantValue(index) +
|
||||||
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
|
||||||
selector->isolate(), m.ResolvedValue());
|
selector->isolate(), m.ResolvedValue());
|
||||||
input_count = 1;
|
input_count = 1;
|
||||||
// Check that the delta is a 32-bit integer due to the limitations of
|
// Check that the delta is a 32-bit integer due to the limitations of
|
||||||
@ -843,11 +843,8 @@ void InstructionSelector::VisitLoad(Node* node) {
|
|||||||
immediate_mode = kLoadStoreImm32;
|
immediate_mode = kLoadStoreImm32;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
opcode = kArm64LdrDecompressTaggedPointer;
|
|
||||||
immediate_mode = kLoadStoreImm32;
|
|
||||||
break;
|
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
opcode = kArm64LdrDecompressAnyTagged;
|
opcode = kArm64LdrDecompressTagged;
|
||||||
immediate_mode = kLoadStoreImm32;
|
immediate_mode = kLoadStoreImm32;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
@ -988,7 +985,7 @@ void InstructionSelector::VisitStore(Node* node) {
|
|||||||
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
||||||
ptrdiff_t const delta =
|
ptrdiff_t const delta =
|
||||||
g.GetIntegerConstantValue(index) +
|
g.GetIntegerConstantValue(index) +
|
||||||
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
|
||||||
isolate(), m.ResolvedValue());
|
isolate(), m.ResolvedValue());
|
||||||
if (is_int32(delta)) {
|
if (is_int32(delta)) {
|
||||||
input_count = 2;
|
input_count = 2;
|
||||||
@ -2773,10 +2770,10 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
|
|||||||
code = kArm64LdarDecompressTaggedSigned;
|
code = kArm64LdarDecompressTaggedSigned;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
code = kArm64LdarDecompressTaggedPointer;
|
code = kArm64LdarDecompressTagged;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
code = kArm64LdarDecompressAnyTagged;
|
code = kArm64LdarDecompressTagged;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
@ -266,14 +266,14 @@ class OutOfLineCode : public ZoneObject {
|
|||||||
Label* entry() { return &entry_; }
|
Label* entry() { return &entry_; }
|
||||||
Label* exit() { return &exit_; }
|
Label* exit() { return &exit_; }
|
||||||
const Frame* frame() const { return frame_; }
|
const Frame* frame() const { return frame_; }
|
||||||
TurboAssembler* tasm() { return tasm_; }
|
MacroAssembler* masm() { return masm_; }
|
||||||
OutOfLineCode* next() const { return next_; }
|
OutOfLineCode* next() const { return next_; }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Label entry_;
|
Label entry_;
|
||||||
Label exit_;
|
Label exit_;
|
||||||
const Frame* const frame_;
|
const Frame* const frame_;
|
||||||
TurboAssembler* const tasm_;
|
MacroAssembler* const masm_;
|
||||||
OutOfLineCode* const next_;
|
OutOfLineCode* const next_;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ CodeGenerator::CodeGenerator(
|
|||||||
current_block_(RpoNumber::Invalid()),
|
current_block_(RpoNumber::Invalid()),
|
||||||
start_source_position_(start_source_position),
|
start_source_position_(start_source_position),
|
||||||
current_source_position_(SourcePosition::Unknown()),
|
current_source_position_(SourcePosition::Unknown()),
|
||||||
tasm_(isolate, options, CodeObjectRequired::kNo,
|
masm_(isolate, options, CodeObjectRequired::kNo,
|
||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
buffer_cache ? buffer_cache->GetAssemblerBuffer(
|
buffer_cache ? buffer_cache->GetAssemblerBuffer(
|
||||||
AssemblerBase::kDefaultBufferSize)
|
AssemblerBase::kDefaultBufferSize)
|
||||||
@ -98,15 +98,15 @@ CodeGenerator::CodeGenerator(
|
|||||||
}
|
}
|
||||||
CreateFrameAccessState(frame);
|
CreateFrameAccessState(frame);
|
||||||
CHECK_EQ(info->is_osr(), osr_helper_.has_value());
|
CHECK_EQ(info->is_osr(), osr_helper_.has_value());
|
||||||
tasm_.set_jump_optimization_info(jump_opt);
|
masm_.set_jump_optimization_info(jump_opt);
|
||||||
CodeKind code_kind = info->code_kind();
|
CodeKind code_kind = info->code_kind();
|
||||||
if (code_kind == CodeKind::WASM_FUNCTION ||
|
if (code_kind == CodeKind::WASM_FUNCTION ||
|
||||||
code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
|
code_kind == CodeKind::WASM_TO_CAPI_FUNCTION ||
|
||||||
code_kind == CodeKind::WASM_TO_JS_FUNCTION ||
|
code_kind == CodeKind::WASM_TO_JS_FUNCTION ||
|
||||||
code_kind == CodeKind::JS_TO_WASM_FUNCTION) {
|
code_kind == CodeKind::JS_TO_WASM_FUNCTION) {
|
||||||
tasm_.set_abort_hard(true);
|
masm_.set_abort_hard(true);
|
||||||
}
|
}
|
||||||
tasm_.set_builtin(builtin);
|
masm_.set_builtin(builtin);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CodeGenerator::wasm_runtime_exception_support() const {
|
bool CodeGenerator::wasm_runtime_exception_support() const {
|
||||||
@ -173,19 +173,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
|
|||||||
Label* jump_deoptimization_entry_label =
|
Label* jump_deoptimization_entry_label =
|
||||||
&jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
|
&jump_deoptimization_entry_labels_[static_cast<int>(deopt_kind)];
|
||||||
if (info()->source_positions()) {
|
if (info()->source_positions()) {
|
||||||
tasm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
|
masm()->RecordDeoptReason(deoptimization_reason, exit->node_id(),
|
||||||
exit->pos(), deoptimization_id);
|
exit->pos(), deoptimization_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (deopt_kind == DeoptimizeKind::kLazy) {
|
if (deopt_kind == DeoptimizeKind::kLazy) {
|
||||||
++lazy_deopt_count_;
|
++lazy_deopt_count_;
|
||||||
tasm()->BindExceptionHandler(exit->label());
|
masm()->BindExceptionHandler(exit->label());
|
||||||
} else {
|
} else {
|
||||||
++eager_deopt_count_;
|
++eager_deopt_count_;
|
||||||
tasm()->bind(exit->label());
|
masm()->bind(exit->label());
|
||||||
}
|
}
|
||||||
Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
|
Builtin target = Deoptimizer::GetDeoptimizationEntry(deopt_kind);
|
||||||
tasm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
|
masm()->CallForDeoptimization(target, deoptimization_id, exit->label(),
|
||||||
deopt_kind, exit->continue_label(),
|
deopt_kind, exit->continue_label(),
|
||||||
jump_deoptimization_entry_label);
|
jump_deoptimization_entry_label);
|
||||||
|
|
||||||
@ -195,7 +195,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenerator::MaybeEmitOutOfLineConstantPool() {
|
void CodeGenerator::MaybeEmitOutOfLineConstantPool() {
|
||||||
tasm()->MaybeEmitOutOfLineConstantPool();
|
masm()->MaybeEmitOutOfLineConstantPool();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenerator::AssembleCode() {
|
void CodeGenerator::AssembleCode() {
|
||||||
@ -204,27 +204,27 @@ void CodeGenerator::AssembleCode() {
|
|||||||
// Open a frame scope to indicate that there is a frame on the stack. The
|
// Open a frame scope to indicate that there is a frame on the stack. The
|
||||||
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
// MANUAL indicates that the scope shouldn't actually generate code to set up
|
||||||
// the frame (that is done in AssemblePrologue).
|
// the frame (that is done in AssemblePrologue).
|
||||||
FrameScope frame_scope(tasm(), StackFrame::MANUAL);
|
FrameScope frame_scope(masm(), StackFrame::MANUAL);
|
||||||
|
|
||||||
if (info->source_positions()) {
|
if (info->source_positions()) {
|
||||||
AssembleSourcePosition(start_source_position());
|
AssembleSourcePosition(start_source_position());
|
||||||
}
|
}
|
||||||
offsets_info_.code_start_register_check = tasm()->pc_offset();
|
offsets_info_.code_start_register_check = masm()->pc_offset();
|
||||||
|
|
||||||
tasm()->CodeEntry();
|
masm()->CodeEntry();
|
||||||
|
|
||||||
// Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
|
// Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
|
||||||
if (v8_flags.debug_code && info->called_with_code_start_register()) {
|
if (v8_flags.debug_code && info->called_with_code_start_register()) {
|
||||||
tasm()->RecordComment("-- Prologue: check code start register --");
|
masm()->RecordComment("-- Prologue: check code start register --");
|
||||||
AssembleCodeStartRegisterCheck();
|
AssembleCodeStartRegisterCheck();
|
||||||
}
|
}
|
||||||
|
|
||||||
offsets_info_.deopt_check = tasm()->pc_offset();
|
offsets_info_.deopt_check = masm()->pc_offset();
|
||||||
// We want to bailout only from JS functions, which are the only ones
|
// We want to bailout only from JS functions, which are the only ones
|
||||||
// that are optimized.
|
// that are optimized.
|
||||||
if (info->IsOptimizing()) {
|
if (info->IsOptimizing()) {
|
||||||
DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
|
DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
|
||||||
tasm()->RecordComment("-- Prologue: check for deoptimization --");
|
masm()->RecordComment("-- Prologue: check for deoptimization --");
|
||||||
BailoutIfDeoptimized();
|
BailoutIfDeoptimized();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -258,22 +258,22 @@ void CodeGenerator::AssembleCode() {
|
|||||||
instr_starts_.assign(instructions()->instructions().size(), {});
|
instr_starts_.assign(instructions()->instructions().size(), {});
|
||||||
}
|
}
|
||||||
// Assemble instructions in assembly order.
|
// Assemble instructions in assembly order.
|
||||||
offsets_info_.blocks_start = tasm()->pc_offset();
|
offsets_info_.blocks_start = masm()->pc_offset();
|
||||||
for (const InstructionBlock* block : instructions()->ao_blocks()) {
|
for (const InstructionBlock* block : instructions()->ao_blocks()) {
|
||||||
// Align loop headers on vendor recommended boundaries.
|
// Align loop headers on vendor recommended boundaries.
|
||||||
if (!tasm()->jump_optimization_info()) {
|
if (!masm()->jump_optimization_info()) {
|
||||||
if (block->ShouldAlignLoopHeader()) {
|
if (block->ShouldAlignLoopHeader()) {
|
||||||
tasm()->LoopHeaderAlign();
|
masm()->LoopHeaderAlign();
|
||||||
} else if (block->ShouldAlignCodeTarget()) {
|
} else if (block->ShouldAlignCodeTarget()) {
|
||||||
tasm()->CodeTargetAlign();
|
masm()->CodeTargetAlign();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (info->trace_turbo_json()) {
|
if (info->trace_turbo_json()) {
|
||||||
block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
|
block_starts_[block->rpo_number().ToInt()] = masm()->pc_offset();
|
||||||
}
|
}
|
||||||
// Bind a label for a block.
|
// Bind a label for a block.
|
||||||
current_block_ = block->rpo_number();
|
current_block_ = block->rpo_number();
|
||||||
unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
|
unwinding_info_writer_.BeginInstructionBlock(masm()->pc_offset(), block);
|
||||||
if (v8_flags.code_comments) {
|
if (v8_flags.code_comments) {
|
||||||
std::ostringstream buffer;
|
std::ostringstream buffer;
|
||||||
buffer << "-- B" << block->rpo_number().ToInt() << " start";
|
buffer << "-- B" << block->rpo_number().ToInt() << " start";
|
||||||
@ -289,12 +289,12 @@ void CodeGenerator::AssembleCode() {
|
|||||||
buffer << " (in loop " << block->loop_header().ToInt() << ")";
|
buffer << " (in loop " << block->loop_header().ToInt() << ")";
|
||||||
}
|
}
|
||||||
buffer << " --";
|
buffer << " --";
|
||||||
tasm()->RecordComment(buffer.str().c_str());
|
masm()->RecordComment(buffer.str().c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
frame_access_state()->MarkHasFrame(block->needs_frame());
|
frame_access_state()->MarkHasFrame(block->needs_frame());
|
||||||
|
|
||||||
tasm()->bind(GetLabel(current_block_));
|
masm()->bind(GetLabel(current_block_));
|
||||||
|
|
||||||
if (block->must_construct_frame()) {
|
if (block->must_construct_frame()) {
|
||||||
AssembleConstructFrame();
|
AssembleConstructFrame();
|
||||||
@ -303,7 +303,7 @@ void CodeGenerator::AssembleCode() {
|
|||||||
// using the roots.
|
// using the roots.
|
||||||
// TODO(mtrofin): investigate how we can avoid doing this repeatedly.
|
// TODO(mtrofin): investigate how we can avoid doing this repeatedly.
|
||||||
if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
|
if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
|
||||||
tasm()->InitializeRootRegister();
|
masm()->InitializeRootRegister();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#ifdef V8_TARGET_ARCH_RISCV64
|
#ifdef V8_TARGET_ARCH_RISCV64
|
||||||
@ -312,10 +312,10 @@ void CodeGenerator::AssembleCode() {
|
|||||||
// back between blocks. the Rvv instruction may get an incorrect vtype. so
|
// back between blocks. the Rvv instruction may get an incorrect vtype. so
|
||||||
// here VectorUnit needs to be cleared to ensure that the vtype is correct
|
// here VectorUnit needs to be cleared to ensure that the vtype is correct
|
||||||
// within the block.
|
// within the block.
|
||||||
tasm()->VU.clear();
|
masm()->VU.clear();
|
||||||
#endif
|
#endif
|
||||||
if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) {
|
if (V8_EMBEDDED_CONSTANT_POOL_BOOL && !block->needs_frame()) {
|
||||||
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
|
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
|
||||||
result_ = AssembleBlock(block);
|
result_ = AssembleBlock(block);
|
||||||
} else {
|
} else {
|
||||||
result_ = AssembleBlock(block);
|
result_ = AssembleBlock(block);
|
||||||
@ -325,29 +325,29 @@ void CodeGenerator::AssembleCode() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Assemble all out-of-line code.
|
// Assemble all out-of-line code.
|
||||||
offsets_info_.out_of_line_code = tasm()->pc_offset();
|
offsets_info_.out_of_line_code = masm()->pc_offset();
|
||||||
if (ools_) {
|
if (ools_) {
|
||||||
tasm()->RecordComment("-- Out of line code --");
|
masm()->RecordComment("-- Out of line code --");
|
||||||
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
|
for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
|
||||||
tasm()->bind(ool->entry());
|
masm()->bind(ool->entry());
|
||||||
ool->Generate();
|
ool->Generate();
|
||||||
if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
|
if (ool->exit()->is_bound()) masm()->jmp(ool->exit());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This nop operation is needed to ensure that the trampoline is not
|
// This nop operation is needed to ensure that the trampoline is not
|
||||||
// confused with the pc of the call before deoptimization.
|
// confused with the pc of the call before deoptimization.
|
||||||
// The test regress/regress-259 is an example of where we need it.
|
// The test regress/regress-259 is an example of where we need it.
|
||||||
tasm()->nop();
|
masm()->nop();
|
||||||
|
|
||||||
// For some targets, we must make sure that constant and veneer pools are
|
// For some targets, we must make sure that constant and veneer pools are
|
||||||
// emitted before emitting the deoptimization exits.
|
// emitted before emitting the deoptimization exits.
|
||||||
PrepareForDeoptimizationExits(&deoptimization_exits_);
|
PrepareForDeoptimizationExits(&deoptimization_exits_);
|
||||||
|
|
||||||
deopt_exit_start_offset_ = tasm()->pc_offset();
|
deopt_exit_start_offset_ = masm()->pc_offset();
|
||||||
|
|
||||||
// Assemble deoptimization exits.
|
// Assemble deoptimization exits.
|
||||||
offsets_info_.deoptimization_exits = tasm()->pc_offset();
|
offsets_info_.deoptimization_exits = masm()->pc_offset();
|
||||||
int last_updated = 0;
|
int last_updated = 0;
|
||||||
// We sort the deoptimization exits here so that the lazy ones will be visited
|
// We sort the deoptimization exits here so that the lazy ones will be visited
|
||||||
// last. We need this as lazy deopts might need additional instructions.
|
// last. We need this as lazy deopts might need additional instructions.
|
||||||
@ -367,7 +367,7 @@ void CodeGenerator::AssembleCode() {
|
|||||||
{
|
{
|
||||||
#ifdef V8_TARGET_ARCH_PPC64
|
#ifdef V8_TARGET_ARCH_PPC64
|
||||||
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
|
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
|
||||||
tasm());
|
masm());
|
||||||
#endif
|
#endif
|
||||||
for (DeoptimizationExit* exit : deoptimization_exits_) {
|
for (DeoptimizationExit* exit : deoptimization_exits_) {
|
||||||
if (exit->emitted()) continue;
|
if (exit->emitted()) continue;
|
||||||
@ -388,19 +388,19 @@ void CodeGenerator::AssembleCode() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
offsets_info_.pools = tasm()->pc_offset();
|
offsets_info_.pools = masm()->pc_offset();
|
||||||
// TODO(jgruber): Move all inlined metadata generation into a new,
|
// TODO(jgruber): Move all inlined metadata generation into a new,
|
||||||
// architecture-independent version of FinishCode. Currently, this includes
|
// architecture-independent version of FinishCode. Currently, this includes
|
||||||
// the safepoint table, handler table, constant pool, and code comments, in
|
// the safepoint table, handler table, constant pool, and code comments, in
|
||||||
// that order.
|
// that order.
|
||||||
FinishCode();
|
FinishCode();
|
||||||
|
|
||||||
offsets_info_.jump_tables = tasm()->pc_offset();
|
offsets_info_.jump_tables = masm()->pc_offset();
|
||||||
// Emit the jump tables.
|
// Emit the jump tables.
|
||||||
if (jump_tables_) {
|
if (jump_tables_) {
|
||||||
tasm()->Align(kSystemPointerSize);
|
masm()->Align(kSystemPointerSize);
|
||||||
for (JumpTable* table = jump_tables_; table; table = table->next()) {
|
for (JumpTable* table = jump_tables_; table; table = table->next()) {
|
||||||
tasm()->bind(table->label());
|
masm()->bind(table->label());
|
||||||
AssembleJumpTable(table->targets(), table->target_count());
|
AssembleJumpTable(table->targets(), table->target_count());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -408,34 +408,35 @@ void CodeGenerator::AssembleCode() {
|
|||||||
// The LinuxPerfJitLogger logs code up until here, excluding the safepoint
|
// The LinuxPerfJitLogger logs code up until here, excluding the safepoint
|
||||||
// table. Resolve the unwinding info now so it is aware of the same code
|
// table. Resolve the unwinding info now so it is aware of the same code
|
||||||
// size as reported by perf.
|
// size as reported by perf.
|
||||||
unwinding_info_writer_.Finish(tasm()->pc_offset());
|
unwinding_info_writer_.Finish(masm()->pc_offset());
|
||||||
|
|
||||||
// Final alignment before starting on the metadata section.
|
// Final alignment before starting on the metadata section.
|
||||||
tasm()->Align(InstructionStream::kMetadataAlignment);
|
masm()->Align(InstructionStream::kMetadataAlignment);
|
||||||
|
|
||||||
safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
|
safepoints()->Emit(masm(), frame()->GetTotalFrameSlotCount());
|
||||||
|
|
||||||
// Emit the exception handler table.
|
// Emit the exception handler table.
|
||||||
if (!handlers_.empty()) {
|
if (!handlers_.empty()) {
|
||||||
handler_table_offset_ = HandlerTable::EmitReturnTableStart(tasm());
|
handler_table_offset_ = HandlerTable::EmitReturnTableStart(masm());
|
||||||
for (size_t i = 0; i < handlers_.size(); ++i) {
|
for (size_t i = 0; i < handlers_.size(); ++i) {
|
||||||
HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
|
HandlerTable::EmitReturnEntry(masm(), handlers_[i].pc_offset,
|
||||||
handlers_[i].handler->pos());
|
handlers_[i].handler->pos());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
tasm()->MaybeEmitOutOfLineConstantPool();
|
masm()->MaybeEmitOutOfLineConstantPool();
|
||||||
tasm()->FinalizeJumpOptimizationInfo();
|
masm()->FinalizeJumpOptimizationInfo();
|
||||||
|
|
||||||
result_ = kSuccess;
|
result_ = kSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef V8_TARGET_ARCH_X64
|
||||||
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
|
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
|
||||||
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
|
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
|
||||||
std::pair<int32_t, Label*>* end) {
|
std::pair<int32_t, Label*>* end) {
|
||||||
if (end - begin < kBinarySearchSwitchMinimalCases) {
|
if (end - begin < kBinarySearchSwitchMinimalCases) {
|
||||||
while (begin != end) {
|
while (begin != end) {
|
||||||
tasm()->JumpIfEqual(input, begin->first, begin->second);
|
masm()->JumpIfEqual(input, begin->first, begin->second);
|
||||||
++begin;
|
++begin;
|
||||||
}
|
}
|
||||||
AssembleArchJumpRegardlessOfAssemblyOrder(def_block);
|
AssembleArchJumpRegardlessOfAssemblyOrder(def_block);
|
||||||
@ -443,11 +444,12 @@ void CodeGenerator::AssembleArchBinarySearchSwitchRange(
|
|||||||
}
|
}
|
||||||
auto middle = begin + (end - begin) / 2;
|
auto middle = begin + (end - begin) / 2;
|
||||||
Label less_label;
|
Label less_label;
|
||||||
tasm()->JumpIfLessThan(input, middle->first, &less_label);
|
masm()->JumpIfLessThan(input, middle->first, &less_label);
|
||||||
AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
|
AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
|
||||||
tasm()->bind(&less_label);
|
masm()->bind(&less_label);
|
||||||
AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
|
AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
|
||||||
}
|
}
|
||||||
|
#endif // V8_TARGET_ARCH_X64
|
||||||
|
|
||||||
void CodeGenerator::AssembleArchJump(RpoNumber target) {
|
void CodeGenerator::AssembleArchJump(RpoNumber target) {
|
||||||
if (!IsNextInAssemblyOrder(target))
|
if (!IsNextInAssemblyOrder(target))
|
||||||
@ -469,7 +471,7 @@ base::OwnedVector<byte> CodeGenerator::GetProtectedInstructionsData() {
|
|||||||
|
|
||||||
MaybeHandle<Code> CodeGenerator::FinalizeCode() {
|
MaybeHandle<Code> CodeGenerator::FinalizeCode() {
|
||||||
if (result_ != kSuccess) {
|
if (result_ != kSuccess) {
|
||||||
tasm()->AbortedCodeGeneration();
|
masm()->AbortedCodeGeneration();
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -482,11 +484,11 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
|
|||||||
|
|
||||||
// Allocate and install the code.
|
// Allocate and install the code.
|
||||||
CodeDesc desc;
|
CodeDesc desc;
|
||||||
tasm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
|
masm()->GetCode(isolate(), &desc, safepoints(), handler_table_offset_);
|
||||||
|
|
||||||
#if defined(V8_OS_WIN64)
|
#if defined(V8_OS_WIN64)
|
||||||
if (Builtins::IsBuiltinId(info_->builtin())) {
|
if (Builtins::IsBuiltinId(info_->builtin())) {
|
||||||
isolate_->SetBuiltinUnwindData(info_->builtin(), tasm()->GetUnwindInfo());
|
isolate_->SetBuiltinUnwindData(info_->builtin(), masm()->GetUnwindInfo());
|
||||||
}
|
}
|
||||||
#endif // V8_OS_WIN64
|
#endif // V8_OS_WIN64
|
||||||
|
|
||||||
@ -508,7 +510,7 @@ MaybeHandle<Code> CodeGenerator::FinalizeCode() {
|
|||||||
|
|
||||||
Handle<Code> code;
|
Handle<Code> code;
|
||||||
if (!maybe_code.ToHandle(&code)) {
|
if (!maybe_code.ToHandle(&code)) {
|
||||||
tasm()->AbortedCodeGeneration();
|
masm()->AbortedCodeGeneration();
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -527,7 +529,7 @@ bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
|
void CodeGenerator::RecordSafepoint(ReferenceMap* references) {
|
||||||
auto safepoint = safepoints()->DefineSafepoint(tasm());
|
auto safepoint = safepoints()->DefineSafepoint(masm());
|
||||||
int frame_header_offset = frame()->GetFixedSlotCount();
|
int frame_header_offset = frame()->GetFixedSlotCount();
|
||||||
for (const InstructionOperand& operand : references->reference_operands()) {
|
for (const InstructionOperand& operand : references->reference_operands()) {
|
||||||
if (operand.IsStackSlot()) {
|
if (operand.IsStackSlot()) {
|
||||||
@ -558,7 +560,7 @@ bool CodeGenerator::IsMaterializableFromRoot(Handle<HeapObject> object,
|
|||||||
CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
|
CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
|
||||||
const InstructionBlock* block) {
|
const InstructionBlock* block) {
|
||||||
if (block->IsHandler()) {
|
if (block->IsHandler()) {
|
||||||
tasm()->ExceptionHandler();
|
masm()->ExceptionHandler();
|
||||||
}
|
}
|
||||||
for (int i = block->code_start(); i < block->code_end(); ++i) {
|
for (int i = block->code_start(); i < block->code_end(); ++i) {
|
||||||
CodeGenResult result = AssembleInstruction(i, block);
|
CodeGenResult result = AssembleInstruction(i, block);
|
||||||
@ -718,7 +720,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
|
|||||||
int instruction_index, const InstructionBlock* block) {
|
int instruction_index, const InstructionBlock* block) {
|
||||||
Instruction* instr = instructions()->InstructionAt(instruction_index);
|
Instruction* instr = instructions()->InstructionAt(instruction_index);
|
||||||
if (info()->trace_turbo_json()) {
|
if (info()->trace_turbo_json()) {
|
||||||
instr_starts_[instruction_index].gap_pc_offset = tasm()->pc_offset();
|
instr_starts_[instruction_index].gap_pc_offset = masm()->pc_offset();
|
||||||
}
|
}
|
||||||
int first_unused_stack_slot;
|
int first_unused_stack_slot;
|
||||||
FlagsMode mode = FlagsModeField::decode(instr->opcode());
|
FlagsMode mode = FlagsModeField::decode(instr->opcode());
|
||||||
@ -738,14 +740,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
|
|||||||
AssembleDeconstructFrame();
|
AssembleDeconstructFrame();
|
||||||
}
|
}
|
||||||
if (info()->trace_turbo_json()) {
|
if (info()->trace_turbo_json()) {
|
||||||
instr_starts_[instruction_index].arch_instr_pc_offset = tasm()->pc_offset();
|
instr_starts_[instruction_index].arch_instr_pc_offset = masm()->pc_offset();
|
||||||
}
|
}
|
||||||
// Assemble architecture-specific code for the instruction.
|
// Assemble architecture-specific code for the instruction.
|
||||||
CodeGenResult result = AssembleArchInstruction(instr);
|
CodeGenResult result = AssembleArchInstruction(instr);
|
||||||
if (result != kSuccess) return result;
|
if (result != kSuccess) return result;
|
||||||
|
|
||||||
if (info()->trace_turbo_json()) {
|
if (info()->trace_turbo_json()) {
|
||||||
instr_starts_[instruction_index].condition_pc_offset = tasm()->pc_offset();
|
instr_starts_[instruction_index].condition_pc_offset = masm()->pc_offset();
|
||||||
}
|
}
|
||||||
|
|
||||||
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
|
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
|
||||||
@ -779,7 +781,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
|
|||||||
branch.false_label = exit->continue_label();
|
branch.false_label = exit->continue_label();
|
||||||
branch.fallthru = true;
|
branch.fallthru = true;
|
||||||
AssembleArchDeoptBranch(instr, &branch);
|
AssembleArchDeoptBranch(instr, &branch);
|
||||||
tasm()->bind(exit->continue_label());
|
masm()->bind(exit->continue_label());
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kFlags_set: {
|
case kFlags_set: {
|
||||||
@ -818,7 +820,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
|
|||||||
if (source_position == current_source_position_) return;
|
if (source_position == current_source_position_) return;
|
||||||
current_source_position_ = source_position;
|
current_source_position_ = source_position;
|
||||||
if (!source_position.IsKnown()) return;
|
if (!source_position.IsKnown()) return;
|
||||||
source_position_table_builder_.AddPosition(tasm()->pc_offset(),
|
source_position_table_builder_.AddPosition(masm()->pc_offset(),
|
||||||
source_position, false);
|
source_position, false);
|
||||||
if (v8_flags.code_comments) {
|
if (v8_flags.code_comments) {
|
||||||
OptimizedCompilationInfo* info = this->info();
|
OptimizedCompilationInfo* info = this->info();
|
||||||
@ -833,8 +835,8 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
|
|||||||
buffer << "-- ";
|
buffer << "-- ";
|
||||||
// Turbolizer only needs the source position, as it can reconstruct
|
// Turbolizer only needs the source position, as it can reconstruct
|
||||||
// the inlining stack from other information.
|
// the inlining stack from other information.
|
||||||
if (info->trace_turbo_json() || !tasm()->isolate() ||
|
if (info->trace_turbo_json() || !masm()->isolate() ||
|
||||||
tasm()->isolate()->concurrent_recompilation_enabled()) {
|
masm()->isolate()->concurrent_recompilation_enabled()) {
|
||||||
buffer << source_position;
|
buffer << source_position;
|
||||||
} else {
|
} else {
|
||||||
AllowGarbageCollection allocation;
|
AllowGarbageCollection allocation;
|
||||||
@ -843,7 +845,7 @@ void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
|
|||||||
buffer << source_position.InliningStack(info);
|
buffer << source_position.InliningStack(info);
|
||||||
}
|
}
|
||||||
buffer << " --";
|
buffer << " --";
|
||||||
tasm()->RecordComment(buffer.str().c_str());
|
masm()->RecordComment(buffer.str().c_str());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -981,7 +983,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
|
|||||||
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
|
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
|
||||||
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
|
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
|
||||||
handlers_.push_back(
|
handlers_.push_back(
|
||||||
{GetLabel(handler_rpo), tasm()->pc_offset_for_safepoint()});
|
{GetLabel(handler_rpo), masm()->pc_offset_for_safepoint()});
|
||||||
}
|
}
|
||||||
|
|
||||||
if (needs_frame_state) {
|
if (needs_frame_state) {
|
||||||
@ -991,7 +993,7 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
|
|||||||
size_t frame_state_offset = 1;
|
size_t frame_state_offset = 1;
|
||||||
FrameStateDescriptor* descriptor =
|
FrameStateDescriptor* descriptor =
|
||||||
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
|
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
|
||||||
int pc_offset = tasm()->pc_offset_for_safepoint();
|
int pc_offset = masm()->pc_offset_for_safepoint();
|
||||||
BuildTranslation(instr, pc_offset, frame_state_offset, 0,
|
BuildTranslation(instr, pc_offset, frame_state_offset, 0,
|
||||||
descriptor->state_combine());
|
descriptor->state_combine());
|
||||||
}
|
}
|
||||||
@ -1325,7 +1327,7 @@ void CodeGenerator::AddTranslationForOperand(Instruction* instr,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenerator::MarkLazyDeoptSite() {
|
void CodeGenerator::MarkLazyDeoptSite() {
|
||||||
last_lazy_deopt_pc_ = tasm()->pc_offset();
|
last_lazy_deopt_pc_ = masm()->pc_offset();
|
||||||
}
|
}
|
||||||
|
|
||||||
DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
|
DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
|
||||||
@ -1336,7 +1338,7 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
|
|||||||
}
|
}
|
||||||
|
|
||||||
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
|
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
|
||||||
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
|
: frame_(gen->frame()), masm_(gen->masm()), next_(gen->ools_) {
|
||||||
gen->ools_ = this;
|
gen->ools_ = this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
|
|||||||
void RecordSafepoint(ReferenceMap* references);
|
void RecordSafepoint(ReferenceMap* references);
|
||||||
|
|
||||||
Zone* zone() const { return zone_; }
|
Zone* zone() const { return zone_; }
|
||||||
TurboAssembler* tasm() { return &tasm_; }
|
MacroAssembler* masm() { return &masm_; }
|
||||||
SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
|
SafepointTableBuilder* safepoint_table_builder() { return &safepoints_; }
|
||||||
size_t handler_table_offset() const { return handler_table_offset_; }
|
size_t handler_table_offset() const { return handler_table_offset_; }
|
||||||
|
|
||||||
@ -278,9 +278,15 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
|
|||||||
#if V8_ENABLE_WEBASSEMBLY
|
#if V8_ENABLE_WEBASSEMBLY
|
||||||
void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
|
void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
#if V8_TARGET_ARCH_X64
|
||||||
|
void AssembleArchBinarySearchSwitchRange(
|
||||||
|
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
|
||||||
|
std::pair<int32_t, Label*>* end, base::Optional<int32_t>& last_cmp_value);
|
||||||
|
#else
|
||||||
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
|
void AssembleArchBinarySearchSwitchRange(Register input, RpoNumber def_block,
|
||||||
std::pair<int32_t, Label*>* begin,
|
std::pair<int32_t, Label*>* begin,
|
||||||
std::pair<int32_t, Label*>* end);
|
std::pair<int32_t, Label*>* end);
|
||||||
|
#endif // V8_TARGET_ARCH_X64
|
||||||
void AssembleArchBinarySearchSwitch(Instruction* instr);
|
void AssembleArchBinarySearchSwitch(Instruction* instr);
|
||||||
void AssembleArchTableSwitch(Instruction* instr);
|
void AssembleArchTableSwitch(Instruction* instr);
|
||||||
|
|
||||||
@ -448,7 +454,7 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
|
|||||||
RpoNumber current_block_;
|
RpoNumber current_block_;
|
||||||
SourcePosition start_source_position_;
|
SourcePosition start_source_position_;
|
||||||
SourcePosition current_source_position_;
|
SourcePosition current_source_position_;
|
||||||
TurboAssembler tasm_;
|
MacroAssembler masm_;
|
||||||
GapResolver resolver_;
|
GapResolver resolver_;
|
||||||
SafepointTableBuilder safepoints_;
|
SafepointTableBuilder safepoints_;
|
||||||
ZoneVector<HandlerInfo> handlers_;
|
ZoneVector<HandlerInfo> handlers_;
|
||||||
|
@ -29,7 +29,7 @@ namespace v8 {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
namespace compiler {
|
namespace compiler {
|
||||||
|
|
||||||
#define __ tasm()->
|
#define __ masm()->
|
||||||
|
|
||||||
#define kScratchDoubleReg xmm0
|
#define kScratchDoubleReg xmm0
|
||||||
|
|
||||||
@ -202,11 +202,11 @@ class IA32OperandConverter : public InstructionOperandConverter {
|
|||||||
void MoveInstructionOperandToRegister(Register destination,
|
void MoveInstructionOperandToRegister(Register destination,
|
||||||
InstructionOperand* op) {
|
InstructionOperand* op) {
|
||||||
if (op->IsImmediate() || op->IsConstant()) {
|
if (op->IsImmediate() || op->IsConstant()) {
|
||||||
gen_->tasm()->mov(destination, ToImmediate(op));
|
gen_->masm()->mov(destination, ToImmediate(op));
|
||||||
} else if (op->IsRegister()) {
|
} else if (op->IsRegister()) {
|
||||||
gen_->tasm()->Move(destination, ToRegister(op));
|
gen_->masm()->Move(destination, ToRegister(op));
|
||||||
} else {
|
} else {
|
||||||
gen_->tasm()->mov(destination, ToOperand(op));
|
gen_->masm()->mov(destination, ToOperand(op));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -475,7 +475,7 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
XMMRegister src0 = i.InputSimd128Register(0); \
|
XMMRegister src0 = i.InputSimd128Register(0); \
|
||||||
Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \
|
Operand src1 = i.InputOperand(instr->InputCount() == 2 ? 1 : 0); \
|
||||||
if (CpuFeatures::IsSupported(AVX)) { \
|
if (CpuFeatures::IsSupported(AVX)) { \
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX); \
|
CpuFeatureScope avx_scope(masm(), AVX); \
|
||||||
__ v##opcode(i.OutputSimd128Register(), src0, src1); \
|
__ v##opcode(i.OutputSimd128Register(), src0, src1); \
|
||||||
} else { \
|
} else { \
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), src0); \
|
DCHECK_EQ(i.OutputSimd128Register(), src0); \
|
||||||
@ -485,11 +485,11 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
|
|
||||||
#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \
|
#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, SSELevel, imm) \
|
||||||
if (CpuFeatures::IsSupported(AVX)) { \
|
if (CpuFeatures::IsSupported(AVX)) { \
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX); \
|
CpuFeatureScope avx_scope(masm(), AVX); \
|
||||||
__ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
|
__ v##opcode(i.OutputSimd128Register(), i.InputSimd128Register(0), \
|
||||||
i.InputOperand(1), imm); \
|
i.InputOperand(1), imm); \
|
||||||
} else { \
|
} else { \
|
||||||
CpuFeatureScope sse_scope(tasm(), SSELevel); \
|
CpuFeatureScope sse_scope(masm(), SSELevel); \
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
|
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \
|
||||||
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
|
__ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \
|
||||||
}
|
}
|
||||||
@ -532,26 +532,25 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
int8_t laneidx = i.InputInt8(1); \
|
int8_t laneidx = i.InputInt8(1); \
|
||||||
if (HasAddressingMode(instr)) { \
|
if (HasAddressingMode(instr)) { \
|
||||||
if (CpuFeatures::IsSupported(AVX)) { \
|
if (CpuFeatures::IsSupported(AVX)) { \
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX); \
|
CpuFeatureScope avx_scope(masm(), AVX); \
|
||||||
__ v##OPCODE(dst, src, i.MemoryOperand(2), laneidx); \
|
__ v##OPCODE(dst, src, i.MemoryOperand(2), laneidx); \
|
||||||
} else { \
|
} else { \
|
||||||
DCHECK_EQ(dst, src); \
|
DCHECK_EQ(dst, src); \
|
||||||
CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \
|
CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \
|
||||||
__ OPCODE(dst, i.MemoryOperand(2), laneidx); \
|
__ OPCODE(dst, i.MemoryOperand(2), laneidx); \
|
||||||
} \
|
} \
|
||||||
} else { \
|
} else { \
|
||||||
if (CpuFeatures::IsSupported(AVX)) { \
|
if (CpuFeatures::IsSupported(AVX)) { \
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX); \
|
CpuFeatureScope avx_scope(masm(), AVX); \
|
||||||
__ v##OPCODE(dst, src, i.InputOperand(2), laneidx); \
|
__ v##OPCODE(dst, src, i.InputOperand(2), laneidx); \
|
||||||
} else { \
|
} else { \
|
||||||
DCHECK_EQ(dst, src); \
|
DCHECK_EQ(dst, src); \
|
||||||
CpuFeatureScope sse_scope(tasm(), CPU_FEATURE); \
|
CpuFeatureScope sse_scope(masm(), CPU_FEATURE); \
|
||||||
__ OPCODE(dst, i.InputOperand(2), laneidx); \
|
__ OPCODE(dst, i.InputOperand(2), laneidx); \
|
||||||
} \
|
} \
|
||||||
} \
|
} \
|
||||||
} while (false)
|
} while (false)
|
||||||
|
|
||||||
|
|
||||||
void CodeGenerator::AssembleDeconstructFrame() {
|
void CodeGenerator::AssembleDeconstructFrame() {
|
||||||
__ mov(esp, ebp);
|
__ mov(esp, ebp);
|
||||||
__ pop(ebp);
|
__ pop(ebp);
|
||||||
@ -566,7 +565,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
void AdjustStackPointerForTailCall(MacroAssembler* masm,
|
||||||
FrameAccessState* state,
|
FrameAccessState* state,
|
||||||
int new_slot_above_sp,
|
int new_slot_above_sp,
|
||||||
bool allow_shrinkage = true) {
|
bool allow_shrinkage = true) {
|
||||||
@ -574,10 +573,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
|||||||
StandardFrameConstants::kFixedSlotCountAboveFp;
|
StandardFrameConstants::kFixedSlotCountAboveFp;
|
||||||
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
||||||
if (stack_slot_delta > 0) {
|
if (stack_slot_delta > 0) {
|
||||||
tasm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
|
masm->AllocateStackSpace(stack_slot_delta * kSystemPointerSize);
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
||||||
tasm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize));
|
masm->add(esp, Immediate(-stack_slot_delta * kSystemPointerSize));
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -617,7 +616,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
|||||||
LocationOperand destination_location(
|
LocationOperand destination_location(
|
||||||
LocationOperand::cast(move->destination()));
|
LocationOperand::cast(move->destination()));
|
||||||
InstructionOperand source(move->source());
|
InstructionOperand source(move->source());
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
destination_location.index());
|
destination_location.index());
|
||||||
if (source.IsStackSlot()) {
|
if (source.IsStackSlot()) {
|
||||||
LocationOperand source_location(LocationOperand::cast(source));
|
LocationOperand source_location(LocationOperand::cast(source));
|
||||||
@ -635,13 +634,13 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
|||||||
move->Eliminate();
|
move->Eliminate();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset, false);
|
first_unused_slot_offset, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||||
int first_unused_slot_offset) {
|
int first_unused_slot_offset) {
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset);
|
first_unused_slot_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -884,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
{
|
{
|
||||||
// We don't actually want to generate a pile of code for this, so just
|
// We don't actually want to generate a pile of code for this, so just
|
||||||
// claim there is a stack frame, without generating one.
|
// claim there is a stack frame, without generating one.
|
||||||
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
|
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
|
||||||
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
|
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -1262,7 +1261,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
|
__ Sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
|
||||||
break;
|
break;
|
||||||
case kIA32Float32Round: {
|
case kIA32Float32Round: {
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
RoundingMode const mode =
|
RoundingMode const mode =
|
||||||
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
|
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
|
||||||
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
|
__ Roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
|
||||||
@ -2112,12 +2111,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kIA32Insertps: {
|
case kIA32Insertps: {
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
__ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
__ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||||
i.InputOperand(2), i.InputInt8(1) << 4);
|
i.InputOperand(2), i.InputInt8(1) << 4);
|
||||||
} else {
|
} else {
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
__ insertps(i.OutputSimd128Register(), i.InputOperand(2),
|
__ insertps(i.OutputSimd128Register(), i.InputOperand(2),
|
||||||
i.InputInt8(1) << 4);
|
i.InputInt8(1) << 4);
|
||||||
}
|
}
|
||||||
@ -2315,12 +2314,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
XMMRegister src1 = i.InputSimd128Register(0);
|
XMMRegister src1 = i.InputSimd128Register(0);
|
||||||
XMMRegister src2 = i.InputSimd128Register(1);
|
XMMRegister src2 = i.InputSimd128Register(1);
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
__ vpminsd(kScratchDoubleReg, src1, src2);
|
__ vpminsd(kScratchDoubleReg, src1, src2);
|
||||||
__ vpcmpeqd(dst, kScratchDoubleReg, src2);
|
__ vpcmpeqd(dst, kScratchDoubleReg, src2);
|
||||||
} else {
|
} else {
|
||||||
DCHECK_EQ(dst, src1);
|
DCHECK_EQ(dst, src1);
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
__ pminsd(dst, src2);
|
__ pminsd(dst, src2);
|
||||||
__ pcmpeqd(dst, src2);
|
__ pcmpeqd(dst, src2);
|
||||||
}
|
}
|
||||||
@ -2328,7 +2327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kSSEI32x4UConvertF32x4: {
|
case kSSEI32x4UConvertF32x4: {
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister tmp = i.TempSimd128Register(0);
|
XMMRegister tmp = i.TempSimd128Register(0);
|
||||||
// NAN->0, negative->0
|
// NAN->0, negative->0
|
||||||
@ -2356,7 +2355,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kAVXI32x4UConvertF32x4: {
|
case kAVXI32x4UConvertF32x4: {
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister tmp = i.TempSimd128Register(0);
|
XMMRegister tmp = i.TempSimd128Register(0);
|
||||||
// NAN->0, negative->0
|
// NAN->0, negative->0
|
||||||
@ -2406,7 +2405,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kSSEI32x4GtU: {
|
case kSSEI32x4GtU: {
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
Operand src = i.InputOperand(1);
|
Operand src = i.InputOperand(1);
|
||||||
__ pmaxud(dst, src);
|
__ pmaxud(dst, src);
|
||||||
@ -2416,7 +2415,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI32x4GtU: {
|
case kAVXI32x4GtU: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src1 = i.InputSimd128Register(0);
|
XMMRegister src1 = i.InputSimd128Register(0);
|
||||||
Operand src2 = i.InputOperand(1);
|
Operand src2 = i.InputOperand(1);
|
||||||
@ -2428,7 +2427,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kSSEI32x4GeU: {
|
case kSSEI32x4GeU: {
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
Operand src = i.InputOperand(1);
|
Operand src = i.InputOperand(1);
|
||||||
__ pminud(dst, src);
|
__ pminud(dst, src);
|
||||||
@ -2436,7 +2435,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI32x4GeU: {
|
case kAVXI32x4GeU: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister src1 = i.InputSimd128Register(0);
|
XMMRegister src1 = i.InputSimd128Register(0);
|
||||||
Operand src2 = i.InputOperand(1);
|
Operand src2 = i.InputOperand(1);
|
||||||
__ vpminud(kScratchDoubleReg, src1, src2);
|
__ vpminud(kScratchDoubleReg, src1, src2);
|
||||||
@ -2552,7 +2551,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI16x8Ne: {
|
case kAVXI16x8Ne: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
__ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
__ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||||
i.InputOperand(1));
|
i.InputOperand(1));
|
||||||
__ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
__ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
||||||
@ -2574,7 +2573,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI16x8GeS: {
|
case kAVXI16x8GeS: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister src1 = i.InputSimd128Register(0);
|
XMMRegister src1 = i.InputSimd128Register(0);
|
||||||
Operand src2 = i.InputOperand(1);
|
Operand src2 = i.InputOperand(1);
|
||||||
__ vpminsw(kScratchDoubleReg, src1, src2);
|
__ vpminsw(kScratchDoubleReg, src1, src2);
|
||||||
@ -2621,7 +2620,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kSSEI16x8GtU: {
|
case kSSEI16x8GtU: {
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
Operand src = i.InputOperand(1);
|
Operand src = i.InputOperand(1);
|
||||||
__ pmaxuw(dst, src);
|
__ pmaxuw(dst, src);
|
||||||
@ -2631,7 +2630,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI16x8GtU: {
|
case kAVXI16x8GtU: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src1 = i.InputSimd128Register(0);
|
XMMRegister src1 = i.InputSimd128Register(0);
|
||||||
Operand src2 = i.InputOperand(1);
|
Operand src2 = i.InputOperand(1);
|
||||||
@ -2643,7 +2642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kSSEI16x8GeU: {
|
case kSSEI16x8GeU: {
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
Operand src = i.InputOperand(1);
|
Operand src = i.InputOperand(1);
|
||||||
__ pminuw(dst, src);
|
__ pminuw(dst, src);
|
||||||
@ -2651,7 +2650,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI16x8GeU: {
|
case kAVXI16x8GeU: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister src1 = i.InputSimd128Register(0);
|
XMMRegister src1 = i.InputSimd128Register(0);
|
||||||
Operand src2 = i.InputOperand(1);
|
Operand src2 = i.InputOperand(1);
|
||||||
__ vpminuw(kScratchDoubleReg, src1, src2);
|
__ vpminuw(kScratchDoubleReg, src1, src2);
|
||||||
@ -2844,7 +2843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI8x16Ne: {
|
case kAVXI8x16Ne: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
__ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
__ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
|
||||||
i.InputOperand(1));
|
i.InputOperand(1));
|
||||||
__ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
__ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
||||||
@ -2859,7 +2858,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kSSEI8x16GeS: {
|
case kSSEI8x16GeS: {
|
||||||
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
Operand src = i.InputOperand(1);
|
Operand src = i.InputOperand(1);
|
||||||
__ pminsb(dst, src);
|
__ pminsb(dst, src);
|
||||||
@ -2867,7 +2866,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI8x16GeS: {
|
case kAVXI8x16GeS: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister src1 = i.InputSimd128Register(0);
|
XMMRegister src1 = i.InputSimd128Register(0);
|
||||||
Operand src2 = i.InputOperand(1);
|
Operand src2 = i.InputOperand(1);
|
||||||
__ vpminsb(kScratchDoubleReg, src1, src2);
|
__ vpminsb(kScratchDoubleReg, src1, src2);
|
||||||
@ -2925,7 +2924,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI8x16GtU: {
|
case kAVXI8x16GtU: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src1 = i.InputSimd128Register(0);
|
XMMRegister src1 = i.InputSimd128Register(0);
|
||||||
Operand src2 = i.InputOperand(1);
|
Operand src2 = i.InputOperand(1);
|
||||||
@ -2944,7 +2943,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXI8x16GeU: {
|
case kAVXI8x16GeU: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister src1 = i.InputSimd128Register(0);
|
XMMRegister src1 = i.InputSimd128Register(0);
|
||||||
Operand src2 = i.InputOperand(1);
|
Operand src2 = i.InputOperand(1);
|
||||||
__ vpminub(kScratchDoubleReg, src1, src2);
|
__ vpminub(kScratchDoubleReg, src1, src2);
|
||||||
@ -3183,7 +3182,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
XMMRegister src = i.InputSimd128Register(0);
|
XMMRegister src = i.InputSimd128Register(0);
|
||||||
uint8_t lane = i.InputUint8(1) & 0xf;
|
uint8_t lane = i.InputUint8(1) & 0xf;
|
||||||
if (CpuFeatures::IsSupported(AVX)) {
|
if (CpuFeatures::IsSupported(AVX)) {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
if (lane < 8) {
|
if (lane < 8) {
|
||||||
__ vpunpcklbw(dst, src, src);
|
__ vpunpcklbw(dst, src, src);
|
||||||
} else {
|
} else {
|
||||||
@ -3234,7 +3233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw);
|
ASSEMBLE_SIMD_PUNPCK_SHUFFLE(punpcklbw);
|
||||||
break;
|
break;
|
||||||
case kSSES16x8UnzipHigh: {
|
case kSSES16x8UnzipHigh: {
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src2 = dst;
|
XMMRegister src2 = dst;
|
||||||
DCHECK_EQ(dst, i.InputSimd128Register(0));
|
DCHECK_EQ(dst, i.InputSimd128Register(0));
|
||||||
@ -3248,7 +3247,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXS16x8UnzipHigh: {
|
case kAVXS16x8UnzipHigh: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src2 = dst;
|
XMMRegister src2 = dst;
|
||||||
if (instr->InputCount() == 2) {
|
if (instr->InputCount() == 2) {
|
||||||
@ -3260,7 +3259,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kSSES16x8UnzipLow: {
|
case kSSES16x8UnzipLow: {
|
||||||
CpuFeatureScope sse_scope(tasm(), SSE4_1);
|
CpuFeatureScope sse_scope(masm(), SSE4_1);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src2 = dst;
|
XMMRegister src2 = dst;
|
||||||
DCHECK_EQ(dst, i.InputSimd128Register(0));
|
DCHECK_EQ(dst, i.InputSimd128Register(0));
|
||||||
@ -3274,7 +3273,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXS16x8UnzipLow: {
|
case kAVXS16x8UnzipLow: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src2 = dst;
|
XMMRegister src2 = dst;
|
||||||
__ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
__ vpxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
|
||||||
@ -3301,7 +3300,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXS8x16UnzipHigh: {
|
case kAVXS8x16UnzipHigh: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src2 = dst;
|
XMMRegister src2 = dst;
|
||||||
if (instr->InputCount() == 2) {
|
if (instr->InputCount() == 2) {
|
||||||
@ -3328,7 +3327,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXS8x16UnzipLow: {
|
case kAVXS8x16UnzipLow: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src2 = dst;
|
XMMRegister src2 = dst;
|
||||||
if (instr->InputCount() == 2) {
|
if (instr->InputCount() == 2) {
|
||||||
@ -3357,7 +3356,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXS8x16TransposeLow: {
|
case kAVXS8x16TransposeLow: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
if (instr->InputCount() == 1) {
|
if (instr->InputCount() == 1) {
|
||||||
__ vpsllw(kScratchDoubleReg, i.InputSimd128Register(0), 8);
|
__ vpsllw(kScratchDoubleReg, i.InputSimd128Register(0), 8);
|
||||||
@ -3387,7 +3386,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kAVXS8x16TransposeHigh: {
|
case kAVXS8x16TransposeHigh: {
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
if (instr->InputCount() == 1) {
|
if (instr->InputCount() == 1) {
|
||||||
__ vpsrlw(dst, i.InputSimd128Register(0), 8);
|
__ vpsrlw(dst, i.InputSimd128Register(0), 8);
|
||||||
@ -3423,7 +3422,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kAVXS8x4Reverse:
|
case kAVXS8x4Reverse:
|
||||||
case kAVXS8x8Reverse: {
|
case kAVXS8x8Reverse: {
|
||||||
DCHECK_EQ(1, instr->InputCount());
|
DCHECK_EQ(1, instr->InputCount());
|
||||||
CpuFeatureScope avx_scope(tasm(), AVX);
|
CpuFeatureScope avx_scope(masm(), AVX);
|
||||||
XMMRegister dst = i.OutputSimd128Register();
|
XMMRegister dst = i.OutputSimd128Register();
|
||||||
XMMRegister src = dst;
|
XMMRegister src = dst;
|
||||||
if (arch_opcode != kAVXS8x2Reverse) {
|
if (arch_opcode != kAVXS8x2Reverse) {
|
||||||
@ -4205,8 +4204,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
|
|||||||
__ j(greater, &mismatch_return, Label::kNear);
|
__ j(greater, &mismatch_return, Label::kNear);
|
||||||
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
|
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
|
||||||
__ bind(&mismatch_return);
|
__ bind(&mismatch_return);
|
||||||
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
|
__ DropArguments(argc_reg, scratch_reg, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
// We use a return instead of a jump for better return address prediction.
|
// We use a return instead of a jump for better return address prediction.
|
||||||
__ Ret();
|
__ Ret();
|
||||||
} else if (additional_pop_count->IsImmediate()) {
|
} else if (additional_pop_count->IsImmediate()) {
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
#include "src/codegen/ia32/assembler-ia32.h"
|
#include "src/codegen/ia32/assembler-ia32.h"
|
||||||
#include "src/codegen/ia32/register-ia32.h"
|
#include "src/codegen/ia32/register-ia32.h"
|
||||||
#include "src/codegen/machine-type.h"
|
#include "src/codegen/machine-type.h"
|
||||||
#include "src/codegen/turbo-assembler.h"
|
#include "src/codegen/macro-assembler-base.h"
|
||||||
#include "src/common/globals.h"
|
#include "src/common/globals.h"
|
||||||
#include "src/compiler/backend/instruction-codes.h"
|
#include "src/compiler/backend/instruction-codes.h"
|
||||||
#include "src/compiler/backend/instruction-selector-impl.h"
|
#include "src/compiler/backend/instruction-selector-impl.h"
|
||||||
@ -208,7 +208,7 @@ class IA32OperandGenerator final : public OperandGenerator {
|
|||||||
m.object().ResolvedValue())) {
|
m.object().ResolvedValue())) {
|
||||||
ptrdiff_t const delta =
|
ptrdiff_t const delta =
|
||||||
m.index().ResolvedValue() +
|
m.index().ResolvedValue() +
|
||||||
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
|
||||||
selector()->isolate(), m.object().ResolvedValue());
|
selector()->isolate(), m.object().ResolvedValue());
|
||||||
if (is_int32(delta)) {
|
if (is_int32(delta)) {
|
||||||
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
|
inputs[(*input_count)++] = TempImmediate(static_cast<int32_t>(delta));
|
||||||
|
@ -451,7 +451,7 @@ bool InstructionSelector::CanAddressRelativeToRootsRegister(
|
|||||||
// 3. IsAddressableThroughRootRegister: Is the target address guaranteed to
|
// 3. IsAddressableThroughRootRegister: Is the target address guaranteed to
|
||||||
// have a fixed root-relative offset? If so, we can ignore 2.
|
// have a fixed root-relative offset? If so, we can ignore 2.
|
||||||
const bool this_root_relative_offset_is_constant =
|
const bool this_root_relative_offset_is_constant =
|
||||||
TurboAssemblerBase::IsAddressableThroughRootRegister(isolate(),
|
MacroAssemblerBase::IsAddressableThroughRootRegister(isolate(),
|
||||||
reference);
|
reference);
|
||||||
return this_root_relative_offset_is_constant;
|
return this_root_relative_offset_is_constant;
|
||||||
}
|
}
|
||||||
|
@ -23,7 +23,7 @@ namespace v8 {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
namespace compiler {
|
namespace compiler {
|
||||||
|
|
||||||
#define __ tasm()->
|
#define __ masm()->
|
||||||
|
|
||||||
// TODO(LOONG_dev): consider renaming these macros.
|
// TODO(LOONG_dev): consider renaming these macros.
|
||||||
#define TRACE_MSG(msg) \
|
#define TRACE_MSG(msg) \
|
||||||
@ -450,8 +450,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
|
|||||||
|
|
||||||
#define ASSEMBLE_IEEE754_BINOP(name) \
|
#define ASSEMBLE_IEEE754_BINOP(name) \
|
||||||
do { \
|
do { \
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||||
UseScratchRegisterScope temps(tasm()); \
|
UseScratchRegisterScope temps(masm()); \
|
||||||
Register scratch = temps.Acquire(); \
|
Register scratch = temps.Acquire(); \
|
||||||
__ PrepareCallCFunction(0, 2, scratch); \
|
__ PrepareCallCFunction(0, 2, scratch); \
|
||||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
|
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
|
||||||
@ -459,8 +459,8 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
|
|||||||
|
|
||||||
#define ASSEMBLE_IEEE754_UNOP(name) \
|
#define ASSEMBLE_IEEE754_UNOP(name) \
|
||||||
do { \
|
do { \
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||||
UseScratchRegisterScope temps(tasm()); \
|
UseScratchRegisterScope temps(masm()); \
|
||||||
Register scratch = temps.Acquire(); \
|
Register scratch = temps.Acquire(); \
|
||||||
__ PrepareCallCFunction(0, 1, scratch); \
|
__ PrepareCallCFunction(0, 1, scratch); \
|
||||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
|
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
|
||||||
@ -487,7 +487,7 @@ void CodeGenerator::AssemblePrepareTailCall() {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
void AdjustStackPointerForTailCall(MacroAssembler* masm,
|
||||||
FrameAccessState* state,
|
FrameAccessState* state,
|
||||||
int new_slot_above_sp,
|
int new_slot_above_sp,
|
||||||
bool allow_shrinkage = true) {
|
bool allow_shrinkage = true) {
|
||||||
@ -495,10 +495,10 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
|||||||
StandardFrameConstants::kFixedSlotCountAboveFp;
|
StandardFrameConstants::kFixedSlotCountAboveFp;
|
||||||
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
||||||
if (stack_slot_delta > 0) {
|
if (stack_slot_delta > 0) {
|
||||||
tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
|
masm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
||||||
tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
|
masm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -507,19 +507,19 @@ void AdjustStackPointerForTailCall(TurboAssembler* tasm,
|
|||||||
|
|
||||||
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||||
int first_unused_slot_offset) {
|
int first_unused_slot_offset) {
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset, false);
|
first_unused_slot_offset, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||||
int first_unused_slot_offset) {
|
int first_unused_slot_offset) {
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset);
|
first_unused_slot_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that {kJavaScriptCallCodeStartRegister} is correct.
|
// Check that {kJavaScriptCallCodeStartRegister} is correct.
|
||||||
void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ ComputeCodeStartAddress(scratch);
|
__ ComputeCodeStartAddress(scratch);
|
||||||
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
|
__ Assert(eq, AbortReason::kWrongFunctionCodeStart,
|
||||||
@ -534,7 +534,7 @@ void CodeGenerator::AssembleCodeStartRegisterCheck() {
|
|||||||
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
// 2. test kMarkedForDeoptimizationBit in those flags; and
|
||||||
// 3. if it is not zero then it jumps to the builtin.
|
// 3. if it is not zero then it jumps to the builtin.
|
||||||
void CodeGenerator::BailoutIfDeoptimized() {
|
void CodeGenerator::BailoutIfDeoptimized() {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||||
__ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
__ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
|
||||||
@ -628,7 +628,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
case kArchCallJSFunction: {
|
case kArchCallJSFunction: {
|
||||||
Register func = i.InputRegister(0);
|
Register func = i.InputRegister(0);
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
// Check the function's context matches the context argument.
|
// Check the function's context matches the context argument.
|
||||||
__ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
|
__ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
|
||||||
@ -642,7 +642,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArchPrepareCallCFunction: {
|
case kArchPrepareCallCFunction: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
int const num_gp_parameters = ParamField::decode(instr->opcode());
|
int const num_gp_parameters = ParamField::decode(instr->opcode());
|
||||||
int const num_fp_parameters = FPParamField::decode(instr->opcode());
|
int const num_fp_parameters = FPParamField::decode(instr->opcode());
|
||||||
@ -749,7 +749,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
{
|
{
|
||||||
// We don't actually want to generate a pile of code for this, so just
|
// We don't actually want to generate a pile of code for this, so just
|
||||||
// claim there is a stack frame, without generating one.
|
// claim there is a stack frame, without generating one.
|
||||||
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
|
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
|
||||||
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
|
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -829,7 +829,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
} else {
|
} else {
|
||||||
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
|
DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
|
||||||
DCHECK_EQ(addressing_mode, kMode_MRI);
|
DCHECK_EQ(addressing_mode, kMode_MRI);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ Add_d(scratch, object, Operand(i.InputInt64(1)));
|
__ Add_d(scratch, object, Operand(i.InputInt64(1)));
|
||||||
__ amswap_db_d(zero_reg, value, scratch);
|
__ amswap_db_d(zero_reg, value, scratch);
|
||||||
@ -843,7 +843,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kArchStackSlot: {
|
case kArchStackSlot: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
FrameOffset offset =
|
FrameOffset offset =
|
||||||
frame_access_state()->GetFrameOffset(i.InputInt32(0));
|
frame_access_state()->GetFrameOffset(i.InputInt32(0));
|
||||||
@ -1225,8 +1225,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
case kLoong64Float64Mod: {
|
case kLoong64Float64Mod: {
|
||||||
// TODO(turbofan): implement directly.
|
// TODO(turbofan): implement directly.
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL);
|
FrameScope scope(masm(), StackFrame::MANUAL);
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ PrepareCallCFunction(0, 2, scratch);
|
__ PrepareCallCFunction(0, 2, scratch);
|
||||||
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
|
__ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
|
||||||
@ -1363,7 +1363,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
__ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0));
|
__ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0));
|
||||||
__ movfr2gr_s(i.OutputRegister(), scratch_d);
|
__ movfr2gr_s(i.OutputRegister(), scratch_d);
|
||||||
if (set_overflow_to_min_i32) {
|
if (set_overflow_to_min_i32) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
|
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
|
||||||
// because INT32_MIN allows easier out-of-bounds detection.
|
// because INT32_MIN allows easier out-of-bounds detection.
|
||||||
@ -1392,7 +1392,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kLoong64Float64ToInt64: {
|
case kLoong64Float64ToInt64: {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
FPURegister scratch_d = kScratchDoubleReg;
|
FPURegister scratch_d = kScratchDoubleReg;
|
||||||
|
|
||||||
@ -1438,7 +1438,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
|
bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
|
||||||
__ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
|
__ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
|
||||||
if (set_overflow_to_min_i32) {
|
if (set_overflow_to_min_i32) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
|
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
|
||||||
// because 0 allows easier out-of-bounds detection.
|
// because 0 allows easier out-of-bounds detection.
|
||||||
@ -1863,11 +1863,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
<< "\""; \
|
<< "\""; \
|
||||||
UNIMPLEMENTED();
|
UNIMPLEMENTED();
|
||||||
|
|
||||||
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
|
void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
|
||||||
Instruction* instr, FlagsCondition condition,
|
Instruction* instr, FlagsCondition condition,
|
||||||
Label* tlabel, Label* flabel, bool fallthru) {
|
Label* tlabel, Label* flabel, bool fallthru) {
|
||||||
#undef __
|
#undef __
|
||||||
#define __ tasm->
|
#define __ masm->
|
||||||
Loong64OperandConverter i(gen, instr);
|
Loong64OperandConverter i(gen, instr);
|
||||||
|
|
||||||
// LOONG64 does not have condition code flags, so compare and branch are
|
// LOONG64 does not have condition code flags, so compare and branch are
|
||||||
@ -1882,7 +1882,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
|
|||||||
__ Branch(tlabel, cc, t8, Operand(zero_reg));
|
__ Branch(tlabel, cc, t8, Operand(zero_reg));
|
||||||
} else if (instr->arch_opcode() == kLoong64Add_d ||
|
} else if (instr->arch_opcode() == kLoong64Add_d ||
|
||||||
instr->arch_opcode() == kLoong64Sub_d) {
|
instr->arch_opcode() == kLoong64Sub_d) {
|
||||||
UseScratchRegisterScope temps(tasm);
|
UseScratchRegisterScope temps(masm);
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
Register scratch2 = temps.Acquire();
|
Register scratch2 = temps.Acquire();
|
||||||
Condition cc = FlagsConditionToConditionOvf(condition);
|
Condition cc = FlagsConditionToConditionOvf(condition);
|
||||||
@ -1941,7 +1941,7 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
|
|||||||
}
|
}
|
||||||
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
|
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
|
||||||
#undef __
|
#undef __
|
||||||
#define __ tasm()->
|
#define __ masm()->
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assembles branches after an instruction.
|
// Assembles branches after an instruction.
|
||||||
@ -1949,7 +1949,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
|
|||||||
Label* tlabel = branch->true_label;
|
Label* tlabel = branch->true_label;
|
||||||
Label* flabel = branch->false_label;
|
Label* flabel = branch->false_label;
|
||||||
|
|
||||||
AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
|
AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
|
||||||
branch->fallthru);
|
branch->fallthru);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2014,7 +2014,7 @@ void CodeGenerator::AssembleArchTrap(Instruction* instr,
|
|||||||
};
|
};
|
||||||
auto ool = zone()->New<OutOfLineTrap>(this, instr);
|
auto ool = zone()->New<OutOfLineTrap>(this, instr);
|
||||||
Label* tlabel = ool->entry();
|
Label* tlabel = ool->entry();
|
||||||
AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
|
AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
|
||||||
}
|
}
|
||||||
#endif // V8_ENABLE_WEBASSEMBLY
|
#endif // V8_ENABLE_WEBASSEMBLY
|
||||||
|
|
||||||
@ -2041,7 +2041,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
|
|||||||
return;
|
return;
|
||||||
} else if (instr->arch_opcode() == kLoong64Add_d ||
|
} else if (instr->arch_opcode() == kLoong64Add_d ||
|
||||||
instr->arch_opcode() == kLoong64Sub_d) {
|
instr->arch_opcode() == kLoong64Sub_d) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
Condition cc = FlagsConditionToConditionOvf(condition);
|
Condition cc = FlagsConditionToConditionOvf(condition);
|
||||||
// Check for overflow creates 1 or 0 for result.
|
// Check for overflow creates 1 or 0 for result.
|
||||||
@ -2289,7 +2289,7 @@ void CodeGenerator::AssembleConstructFrame() {
|
|||||||
// exception unconditionally. Thereby we can avoid the integer overflow
|
// exception unconditionally. Thereby we can avoid the integer overflow
|
||||||
// check in the condition code.
|
// check in the condition code.
|
||||||
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
|
if (required_slots * kSystemPointerSize < v8_flags.stack_size * KB) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ Ld_d(scratch, FieldMemOperand(
|
__ Ld_d(scratch, FieldMemOperand(
|
||||||
kWasmInstanceRegister,
|
kWasmInstanceRegister,
|
||||||
@ -2444,7 +2444,7 @@ AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {
|
|||||||
__ Push(g.ToRegister(source));
|
__ Push(g.ToRegister(source));
|
||||||
frame_access_state()->IncreaseSPDelta(new_slots);
|
frame_access_state()->IncreaseSPDelta(new_slots);
|
||||||
} else if (source->IsStackSlot()) {
|
} else if (source->IsStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ Ld_d(scratch, g.ToMemOperand(source));
|
__ Ld_d(scratch, g.ToMemOperand(source));
|
||||||
__ Push(scratch);
|
__ Push(scratch);
|
||||||
@ -2467,7 +2467,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
|
|||||||
if (dest->IsRegister()) {
|
if (dest->IsRegister()) {
|
||||||
__ Pop(g.ToRegister(dest));
|
__ Pop(g.ToRegister(dest));
|
||||||
} else if (dest->IsStackSlot()) {
|
} else if (dest->IsStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ Pop(scratch);
|
__ Pop(scratch);
|
||||||
__ St_d(scratch, g.ToMemOperand(dest));
|
__ St_d(scratch, g.ToMemOperand(dest));
|
||||||
@ -2495,7 +2495,7 @@ void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
|
|||||||
MachineRepresentation rep) {
|
MachineRepresentation rep) {
|
||||||
// Must be kept in sync with {MoveTempLocationTo}.
|
// Must be kept in sync with {MoveTempLocationTo}.
|
||||||
DCHECK(!source->IsImmediate());
|
DCHECK(!source->IsImmediate());
|
||||||
move_cycle_.temps.emplace(tasm());
|
move_cycle_.temps.emplace(masm());
|
||||||
auto& temps = *move_cycle_.temps;
|
auto& temps = *move_cycle_.temps;
|
||||||
// Temporarily exclude the reserved scratch registers while we pick one to
|
// Temporarily exclude the reserved scratch registers while we pick one to
|
||||||
// resolve the move cycle. Re-include them immediately afterwards as they
|
// resolve the move cycle. Re-include them immediately afterwards as they
|
||||||
@ -2585,7 +2585,7 @@ void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
|
|||||||
void CodeGenerator::SetPendingMove(MoveOperands* move) {
|
void CodeGenerator::SetPendingMove(MoveOperands* move) {
|
||||||
InstructionOperand* src = &move->source();
|
InstructionOperand* src = &move->source();
|
||||||
InstructionOperand* dst = &move->destination();
|
InstructionOperand* dst = &move->destination();
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
if (src->IsConstant() || (src->IsStackSlot() && dst->IsStackSlot())) {
|
if (src->IsConstant() || (src->IsStackSlot() && dst->IsStackSlot())) {
|
||||||
Register temp = temps.Acquire();
|
Register temp = temps.Acquire();
|
||||||
move_cycle_.scratch_regs.set(temp);
|
move_cycle_.scratch_regs.set(temp);
|
||||||
@ -2642,7 +2642,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
if (destination->IsRegister()) {
|
if (destination->IsRegister()) {
|
||||||
__ Ld_d(g.ToRegister(destination), src);
|
__ Ld_d(g.ToRegister(destination), src);
|
||||||
} else {
|
} else {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ Ld_d(scratch, src);
|
__ Ld_d(scratch, src);
|
||||||
__ St_d(scratch, g.ToMemOperand(destination));
|
__ St_d(scratch, g.ToMemOperand(destination));
|
||||||
@ -2650,7 +2650,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
} else if (source->IsConstant()) {
|
} else if (source->IsConstant()) {
|
||||||
Constant src = g.ToConstant(source);
|
Constant src = g.ToConstant(source);
|
||||||
if (destination->IsRegister() || destination->IsStackSlot()) {
|
if (destination->IsRegister() || destination->IsStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
Register dst =
|
Register dst =
|
||||||
destination->IsRegister() ? g.ToRegister(destination) : scratch;
|
destination->IsRegister() ? g.ToRegister(destination) : scratch;
|
||||||
@ -2697,7 +2697,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
|
|||||||
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
|
if (base::bit_cast<int32_t>(src.ToFloat32()) == 0) {
|
||||||
__ St_d(zero_reg, dst);
|
__ St_d(zero_reg, dst);
|
||||||
} else {
|
} else {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ li(scratch, Operand(base::bit_cast<int32_t>(src.ToFloat32())));
|
__ li(scratch, Operand(base::bit_cast<int32_t>(src.ToFloat32())));
|
||||||
__ St_d(scratch, dst);
|
__ St_d(scratch, dst);
|
||||||
@ -2748,7 +2748,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
// Dispatch on the source and destination operand kinds. Not all
|
// Dispatch on the source and destination operand kinds. Not all
|
||||||
// combinations are possible.
|
// combinations are possible.
|
||||||
if (source->IsRegister()) {
|
if (source->IsRegister()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
// Register-register.
|
// Register-register.
|
||||||
Register src = g.ToRegister(source);
|
Register src = g.ToRegister(source);
|
||||||
@ -2770,7 +2770,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
// Since the Ld instruction may need a scratch reg,
|
// Since the Ld instruction may need a scratch reg,
|
||||||
// we should not use both of the two scratch registers in
|
// we should not use both of the two scratch registers in
|
||||||
// UseScratchRegisterScope here.
|
// UseScratchRegisterScope here.
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
FPURegister scratch_d = kScratchDoubleReg;
|
FPURegister scratch_d = kScratchDoubleReg;
|
||||||
MemOperand src = g.ToMemOperand(source);
|
MemOperand src = g.ToMemOperand(source);
|
||||||
@ -2796,7 +2796,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
|
|||||||
}
|
}
|
||||||
} else if (source->IsFPStackSlot()) {
|
} else if (source->IsFPStackSlot()) {
|
||||||
DCHECK(destination->IsFPStackSlot());
|
DCHECK(destination->IsFPStackSlot());
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
FPURegister scratch_d = kScratchDoubleReg;
|
FPURegister scratch_d = kScratchDoubleReg;
|
||||||
MemOperand src = g.ToMemOperand(source);
|
MemOperand src = g.ToMemOperand(source);
|
||||||
|
@ -360,7 +360,7 @@ void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
|
|||||||
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
||||||
ptrdiff_t const delta =
|
ptrdiff_t const delta =
|
||||||
g.GetIntegerConstantValue(index) +
|
g.GetIntegerConstantValue(index) +
|
||||||
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
|
||||||
selector->isolate(), m.ResolvedValue());
|
selector->isolate(), m.ResolvedValue());
|
||||||
// Check that the delta is a 32-bit integer due to the limitations of
|
// Check that the delta is a 32-bit integer due to the limitations of
|
||||||
// immediate operands.
|
// immediate operands.
|
||||||
@ -560,7 +560,7 @@ void InstructionSelector::VisitStore(Node* node) {
|
|||||||
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
|
||||||
ptrdiff_t const delta =
|
ptrdiff_t const delta =
|
||||||
g.GetIntegerConstantValue(index) +
|
g.GetIntegerConstantValue(index) +
|
||||||
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
|
MacroAssemblerBase::RootRegisterOffsetForExternalReference(
|
||||||
isolate(), m.ResolvedValue());
|
isolate(), m.ResolvedValue());
|
||||||
// Check that the delta is a 32-bit integer due to the limitations of
|
// Check that the delta is a 32-bit integer due to the limitations of
|
||||||
// immediate operands.
|
// immediate operands.
|
||||||
@ -1398,21 +1398,33 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
|
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
|
||||||
// On LoongArch64, int32 values should all be sign-extended to 64-bit, so
|
|
||||||
// no need to sign-extend them here.
|
|
||||||
// But when call to a host function in simulator, if the function return an
|
|
||||||
// int32 value, the simulator do not sign-extend to int64, because in
|
|
||||||
// simulator we do not know the function whether return an int32 or int64.
|
|
||||||
#ifdef USE_SIMULATOR
|
|
||||||
Node* value = node->InputAt(0);
|
Node* value = node->InputAt(0);
|
||||||
if (value->opcode() == IrOpcode::kCall) {
|
if ((value->opcode() == IrOpcode::kLoad ||
|
||||||
|
value->opcode() == IrOpcode::kLoadImmutable) &&
|
||||||
|
CanCover(node, value)) {
|
||||||
|
// Generate sign-extending load.
|
||||||
|
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
|
||||||
|
InstructionCode opcode = kArchNop;
|
||||||
|
switch (load_rep.representation()) {
|
||||||
|
case MachineRepresentation::kBit: // Fall through.
|
||||||
|
case MachineRepresentation::kWord8:
|
||||||
|
opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord16:
|
||||||
|
opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord32:
|
||||||
|
opcode = kLoong64Ld_w;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
EmitLoad(this, value, opcode, node);
|
||||||
|
} else {
|
||||||
Loong64OperandGenerator g(this);
|
Loong64OperandGenerator g(this);
|
||||||
Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
|
Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(value),
|
||||||
g.TempImmediate(0));
|
g.TempImmediate(0));
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
EmitIdentity(node);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
|
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -775,7 +775,7 @@ int PrepareForTailCallLatency() {
|
|||||||
int AssertLatency() { return 1; }
|
int AssertLatency() { return 1; }
|
||||||
|
|
||||||
int PrepareCallCFunctionLatency() {
|
int PrepareCallCFunctionLatency() {
|
||||||
int frame_alignment = TurboAssembler::ActivationFrameAlignment();
|
int frame_alignment = MacroAssembler::ActivationFrameAlignment();
|
||||||
if (frame_alignment > kSystemPointerSize) {
|
if (frame_alignment > kSystemPointerSize) {
|
||||||
return 1 + DsubuLatency(false) + AndLatency(false) + 1;
|
return 1 + DsubuLatency(false) + AndLatency(false) + 1;
|
||||||
} else {
|
} else {
|
||||||
|
@ -1481,21 +1481,33 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
|
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
|
||||||
// On MIPS64, int32 values should all be sign-extended to 64-bit, so
|
|
||||||
// no need to sign-extend them here.
|
|
||||||
// But when call to a host function in simulator, if the function return an
|
|
||||||
// int32 value, the simulator do not sign-extend to int64, because in
|
|
||||||
// simulator we do not know the function whether return an int32 or int64.
|
|
||||||
#ifdef USE_SIMULATOR
|
|
||||||
Node* value = node->InputAt(0);
|
Node* value = node->InputAt(0);
|
||||||
if (value->opcode() == IrOpcode::kCall) {
|
if ((value->opcode() == IrOpcode::kLoad ||
|
||||||
|
value->opcode() == IrOpcode::kLoadImmutable) &&
|
||||||
|
CanCover(node, value)) {
|
||||||
|
// Generate sign-extending load.
|
||||||
|
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
|
||||||
|
InstructionCode opcode = kArchNop;
|
||||||
|
switch (load_rep.representation()) {
|
||||||
|
case MachineRepresentation::kBit: // Fall through.
|
||||||
|
case MachineRepresentation::kWord8:
|
||||||
|
opcode = load_rep.IsUnsigned() ? kMips64Lbu : kMips64Lb;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord16:
|
||||||
|
opcode = load_rep.IsUnsigned() ? kMips64Lhu : kMips64Lh;
|
||||||
|
break;
|
||||||
|
case MachineRepresentation::kWord32:
|
||||||
|
opcode = kMips64Lw;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
EmitLoad(this, value, opcode, node);
|
||||||
|
} else {
|
||||||
Mips64OperandGenerator g(this);
|
Mips64OperandGenerator g(this);
|
||||||
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
|
Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(value),
|
||||||
g.TempImmediate(0));
|
g.TempImmediate(0));
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
EmitIdentity(node);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
|
bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
|
||||||
|
@ -23,7 +23,7 @@ namespace v8 {
|
|||||||
namespace internal {
|
namespace internal {
|
||||||
namespace compiler {
|
namespace compiler {
|
||||||
|
|
||||||
#define __ tasm()->
|
#define __ masm()->
|
||||||
|
|
||||||
#define kScratchReg r11
|
#define kScratchReg r11
|
||||||
|
|
||||||
@ -170,9 +170,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Generate() final {
|
void Generate() final {
|
||||||
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
|
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
|
||||||
if (COMPRESS_POINTERS_BOOL) {
|
if (COMPRESS_POINTERS_BOOL) {
|
||||||
__ DecompressTaggedPointer(value_, value_);
|
__ DecompressTagged(value_, value_);
|
||||||
}
|
}
|
||||||
__ CheckPageFlag(
|
__ CheckPageFlag(
|
||||||
value_, scratch0_,
|
value_, scratch0_,
|
||||||
@ -409,7 +409,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
|
|||||||
|
|
||||||
#define ASSEMBLE_FLOAT_MODULO() \
|
#define ASSEMBLE_FLOAT_MODULO() \
|
||||||
do { \
|
do { \
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||||
__ PrepareCallCFunction(0, 2, kScratchReg); \
|
__ PrepareCallCFunction(0, 2, kScratchReg); \
|
||||||
__ MovToFloatParameters(i.InputDoubleRegister(0), \
|
__ MovToFloatParameters(i.InputDoubleRegister(0), \
|
||||||
i.InputDoubleRegister(1)); \
|
i.InputDoubleRegister(1)); \
|
||||||
@ -422,7 +422,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
|
|||||||
do { \
|
do { \
|
||||||
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
||||||
/* and generate a CallAddress instruction instead. */ \
|
/* and generate a CallAddress instruction instead. */ \
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||||
__ PrepareCallCFunction(0, 1, kScratchReg); \
|
__ PrepareCallCFunction(0, 1, kScratchReg); \
|
||||||
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
|
__ MovToFloatParameter(i.InputDoubleRegister(0)); \
|
||||||
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
|
__ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
|
||||||
@ -435,7 +435,7 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
|
|||||||
do { \
|
do { \
|
||||||
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
/* TODO(bmeurer): We should really get rid of this special instruction, */ \
|
||||||
/* and generate a CallAddress instruction instead. */ \
|
/* and generate a CallAddress instruction instead. */ \
|
||||||
FrameScope scope(tasm(), StackFrame::MANUAL); \
|
FrameScope scope(masm(), StackFrame::MANUAL); \
|
||||||
__ PrepareCallCFunction(0, 2, kScratchReg); \
|
__ PrepareCallCFunction(0, 2, kScratchReg); \
|
||||||
__ MovToFloatParameters(i.InputDoubleRegister(0), \
|
__ MovToFloatParameters(i.InputDoubleRegister(0), \
|
||||||
i.InputDoubleRegister(1)); \
|
i.InputDoubleRegister(1)); \
|
||||||
@ -680,20 +680,20 @@ void CodeGenerator::AssemblePrepareTailCall() {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
void FlushPendingPushRegisters(TurboAssembler* tasm,
|
void FlushPendingPushRegisters(MacroAssembler* masm,
|
||||||
FrameAccessState* frame_access_state,
|
FrameAccessState* frame_access_state,
|
||||||
ZoneVector<Register>* pending_pushes) {
|
ZoneVector<Register>* pending_pushes) {
|
||||||
switch (pending_pushes->size()) {
|
switch (pending_pushes->size()) {
|
||||||
case 0:
|
case 0:
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
tasm->Push((*pending_pushes)[0]);
|
masm->Push((*pending_pushes)[0]);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
|
masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
tasm->Push((*pending_pushes)[0], (*pending_pushes)[1],
|
masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
|
||||||
(*pending_pushes)[2]);
|
(*pending_pushes)[2]);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -704,7 +704,7 @@ void FlushPendingPushRegisters(TurboAssembler* tasm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void AdjustStackPointerForTailCall(
|
void AdjustStackPointerForTailCall(
|
||||||
TurboAssembler* tasm, FrameAccessState* state, int new_slot_above_sp,
|
MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
|
||||||
ZoneVector<Register>* pending_pushes = nullptr,
|
ZoneVector<Register>* pending_pushes = nullptr,
|
||||||
bool allow_shrinkage = true) {
|
bool allow_shrinkage = true) {
|
||||||
int current_sp_offset = state->GetSPToFPSlotCount() +
|
int current_sp_offset = state->GetSPToFPSlotCount() +
|
||||||
@ -712,15 +712,15 @@ void AdjustStackPointerForTailCall(
|
|||||||
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
|
||||||
if (stack_slot_delta > 0) {
|
if (stack_slot_delta > 0) {
|
||||||
if (pending_pushes != nullptr) {
|
if (pending_pushes != nullptr) {
|
||||||
FlushPendingPushRegisters(tasm, state, pending_pushes);
|
FlushPendingPushRegisters(masm, state, pending_pushes);
|
||||||
}
|
}
|
||||||
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
|
masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
} else if (allow_shrinkage && stack_slot_delta < 0) {
|
||||||
if (pending_pushes != nullptr) {
|
if (pending_pushes != nullptr) {
|
||||||
FlushPendingPushRegisters(tasm, state, pending_pushes);
|
FlushPendingPushRegisters(masm, state, pending_pushes);
|
||||||
}
|
}
|
||||||
tasm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
|
masm->AddS64(sp, sp, Operand(-stack_slot_delta * kSystemPointerSize), r0);
|
||||||
state->IncreaseSPDelta(stack_slot_delta);
|
state->IncreaseSPDelta(stack_slot_delta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -742,7 +742,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
|||||||
LocationOperand::cast(move->destination()));
|
LocationOperand::cast(move->destination()));
|
||||||
InstructionOperand source(move->source());
|
InstructionOperand source(move->source());
|
||||||
AdjustStackPointerForTailCall(
|
AdjustStackPointerForTailCall(
|
||||||
tasm(), frame_access_state(),
|
masm(), frame_access_state(),
|
||||||
destination_location.index() - pending_pushes.size(),
|
destination_location.index() - pending_pushes.size(),
|
||||||
&pending_pushes);
|
&pending_pushes);
|
||||||
// Pushes of non-register data types are not supported.
|
// Pushes of non-register data types are not supported.
|
||||||
@ -752,20 +752,20 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
|||||||
// TODO(arm): We can push more than 3 registers at once. Add support in
|
// TODO(arm): We can push more than 3 registers at once. Add support in
|
||||||
// the macro-assembler for pushing a list of registers.
|
// the macro-assembler for pushing a list of registers.
|
||||||
if (pending_pushes.size() == 3) {
|
if (pending_pushes.size() == 3) {
|
||||||
FlushPendingPushRegisters(tasm(), frame_access_state(),
|
FlushPendingPushRegisters(masm(), frame_access_state(),
|
||||||
&pending_pushes);
|
&pending_pushes);
|
||||||
}
|
}
|
||||||
move->Eliminate();
|
move->Eliminate();
|
||||||
}
|
}
|
||||||
FlushPendingPushRegisters(tasm(), frame_access_state(), &pending_pushes);
|
FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
|
||||||
}
|
}
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset, nullptr, false);
|
first_unused_slot_offset, nullptr, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
|
||||||
int first_unused_slot_offset) {
|
int first_unused_slot_offset) {
|
||||||
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
|
AdjustStackPointerForTailCall(masm(), frame_access_state(),
|
||||||
first_unused_slot_offset);
|
first_unused_slot_offset);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -793,8 +793,8 @@ void CodeGenerator::BailoutIfDeoptimized() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
int offset = InstructionStream::kCodeOffset - InstructionStream::kHeaderSize;
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r11, MemOperand(kJavaScriptCallCodeStartRegister, offset),
|
||||||
r11, MemOperand(kJavaScriptCallCodeStartRegister, offset), r0);
|
r0);
|
||||||
__ LoadS32(r11, FieldMemOperand(r11, Code::kKindSpecificFlagsOffset), r0);
|
__ LoadS32(r11, FieldMemOperand(r11, Code::kKindSpecificFlagsOffset), r0);
|
||||||
__ TestBit(r11, InstructionStream::kMarkedForDeoptimizationBit);
|
__ TestBit(r11, InstructionStream::kMarkedForDeoptimizationBit);
|
||||||
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
__ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
|
||||||
@ -810,7 +810,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
switch (opcode) {
|
switch (opcode) {
|
||||||
case kArchCallCodeObject: {
|
case kArchCallCodeObject: {
|
||||||
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
|
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
|
||||||
tasm());
|
masm());
|
||||||
if (HasRegisterInput(instr, 0)) {
|
if (HasRegisterInput(instr, 0)) {
|
||||||
Register reg = i.InputRegister(0);
|
Register reg = i.InputRegister(0);
|
||||||
DCHECK_IMPLIES(
|
DCHECK_IMPLIES(
|
||||||
@ -883,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
} else {
|
} else {
|
||||||
// We cannot use the constant pool to load the target since
|
// We cannot use the constant pool to load the target since
|
||||||
// we've already restored the caller's frame.
|
// we've already restored the caller's frame.
|
||||||
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
|
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
|
||||||
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
|
__ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
DCHECK_EQ(LeaveRC, i.OutputRCBit());
|
DCHECK_EQ(LeaveRC, i.OutputRCBit());
|
||||||
@ -904,18 +904,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
}
|
}
|
||||||
case kArchCallJSFunction: {
|
case kArchCallJSFunction: {
|
||||||
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
|
v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
|
||||||
tasm());
|
masm());
|
||||||
Register func = i.InputRegister(0);
|
Register func = i.InputRegister(0);
|
||||||
if (v8_flags.debug_code) {
|
if (v8_flags.debug_code) {
|
||||||
// Check the function's context matches the context argument.
|
// Check the function's context matches the context argument.
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(
|
||||||
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0);
|
kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset), r0);
|
||||||
__ CmpS64(cp, kScratchReg);
|
__ CmpS64(cp, kScratchReg);
|
||||||
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
__ Assert(eq, AbortReason::kWrongFunctionContext);
|
||||||
}
|
}
|
||||||
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
static_assert(kJavaScriptCallCodeStartRegister == r5, "ABI mismatch");
|
||||||
__ LoadTaggedPointerField(
|
__ LoadTaggedField(r5, FieldMemOperand(func, JSFunction::kCodeOffset),
|
||||||
r5, FieldMemOperand(func, JSFunction::kCodeOffset), r0);
|
r0);
|
||||||
__ CallCodeObject(r5);
|
__ CallCodeObject(r5);
|
||||||
RecordCallPosition(instr);
|
RecordCallPosition(instr);
|
||||||
DCHECK_EQ(LeaveRC, i.OutputRCBit());
|
DCHECK_EQ(LeaveRC, i.OutputRCBit());
|
||||||
@ -1058,7 +1058,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
{
|
{
|
||||||
// We don't actually want to generate a pile of code for this, so just
|
// We don't actually want to generate a pile of code for this, so just
|
||||||
// claim there is a stack frame, without generating one.
|
// claim there is a stack frame, without generating one.
|
||||||
FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE);
|
FrameScope scope(masm(), StackFrame::NO_FRAME_TYPE);
|
||||||
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
|
__ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck),
|
||||||
RelocInfo::CODE_TARGET);
|
RelocInfo::CODE_TARGET);
|
||||||
}
|
}
|
||||||
@ -2880,13 +2880,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
|||||||
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case kPPC_LoadDecompressTaggedPointer: {
|
case kPPC_LoadDecompressTagged: {
|
||||||
CHECK(instr->HasOutput());
|
|
||||||
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
|
||||||
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case kPPC_LoadDecompressAnyTagged: {
|
|
||||||
CHECK(instr->HasOutput());
|
CHECK(instr->HasOutput());
|
||||||
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
ASSEMBLE_LOAD_INTEGER(lwz, plwz, lwzx, false);
|
||||||
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
|
__ add(i.OutputRegister(), i.OutputRegister(), kPtrComprCageBaseRegister);
|
||||||
@ -3320,7 +3314,7 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
|
|||||||
AssembleDeconstructFrame();
|
AssembleDeconstructFrame();
|
||||||
}
|
}
|
||||||
// Constant pool is unavailable since the frame has been destructed
|
// Constant pool is unavailable since the frame has been destructed
|
||||||
ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
|
ConstantPoolUnavailableScope constant_pool_unavailable(masm());
|
||||||
if (drop_jsargs) {
|
if (drop_jsargs) {
|
||||||
// We must pop all arguments from the stack (including the receiver).
|
// We must pop all arguments from the stack (including the receiver).
|
||||||
// The number of arguments without the receiver is
|
// The number of arguments without the receiver is
|
||||||
@ -3334,8 +3328,8 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
|
|||||||
__ mov(argc_reg, Operand(parameter_slots));
|
__ mov(argc_reg, Operand(parameter_slots));
|
||||||
__ bind(&skip);
|
__ bind(&skip);
|
||||||
}
|
}
|
||||||
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
|
__ DropArguments(argc_reg, MacroAssembler::kCountIsInteger,
|
||||||
TurboAssembler::kCountIncludesReceiver);
|
MacroAssembler::kCountIncludesReceiver);
|
||||||
} else if (additional_pop_count->IsImmediate()) {
|
} else if (additional_pop_count->IsImmediate()) {
|
||||||
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
|
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
|
||||||
__ Drop(parameter_slots + additional_count);
|
__ Drop(parameter_slots + additional_count);
|
||||||
@ -3391,7 +3385,7 @@ void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {
|
|||||||
frame_access_state()->IncreaseSPDelta(-new_slots);
|
frame_access_state()->IncreaseSPDelta(-new_slots);
|
||||||
PPCOperandConverter g(this, nullptr);
|
PPCOperandConverter g(this, nullptr);
|
||||||
if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
|
if (dest->IsFloatStackSlot() || dest->IsDoubleStackSlot()) {
|
||||||
UseScratchRegisterScope temps(tasm());
|
UseScratchRegisterScope temps(masm());
|
||||||
Register scratch = temps.Acquire();
|
Register scratch = temps.Acquire();
|
||||||
__ Pop(scratch);
|
__ Pop(scratch);
|
||||||
__ StoreU64(scratch, g.ToMemOperand(dest), r0);
|
__ StoreU64(scratch, g.ToMemOperand(dest), r0);
|
||||||
|
@ -411,8 +411,7 @@ namespace compiler {
|
|||||||
V(PPC_S128Store64Lane) \
|
V(PPC_S128Store64Lane) \
|
||||||
V(PPC_StoreCompressTagged) \
|
V(PPC_StoreCompressTagged) \
|
||||||
V(PPC_LoadDecompressTaggedSigned) \
|
V(PPC_LoadDecompressTaggedSigned) \
|
||||||
V(PPC_LoadDecompressTaggedPointer) \
|
V(PPC_LoadDecompressTagged)
|
||||||
V(PPC_LoadDecompressAnyTagged)
|
|
||||||
|
|
||||||
// Addressing modes represent the "shape" of inputs to an instruction.
|
// Addressing modes represent the "shape" of inputs to an instruction.
|
||||||
// Many instructions support multiple addressing modes. Addressing modes
|
// Many instructions support multiple addressing modes. Addressing modes
|
||||||
|
@ -331,8 +331,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
|
|||||||
case kPPC_LoadSimd128:
|
case kPPC_LoadSimd128:
|
||||||
case kPPC_Peek:
|
case kPPC_Peek:
|
||||||
case kPPC_LoadDecompressTaggedSigned:
|
case kPPC_LoadDecompressTaggedSigned:
|
||||||
case kPPC_LoadDecompressTaggedPointer:
|
case kPPC_LoadDecompressTagged:
|
||||||
case kPPC_LoadDecompressAnyTagged:
|
|
||||||
case kPPC_S128Load8Splat:
|
case kPPC_S128Load8Splat:
|
||||||
case kPPC_S128Load16Splat:
|
case kPPC_S128Load16Splat:
|
||||||
case kPPC_S128Load32Splat:
|
case kPPC_S128Load32Splat:
|
||||||
|
@ -214,10 +214,10 @@ static void VisitLoadCommon(InstructionSelector* selector, Node* node,
|
|||||||
opcode = kPPC_LoadDecompressTaggedSigned;
|
opcode = kPPC_LoadDecompressTaggedSigned;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTaggedPointer:
|
case MachineRepresentation::kTaggedPointer:
|
||||||
opcode = kPPC_LoadDecompressTaggedPointer;
|
opcode = kPPC_LoadDecompressTagged;
|
||||||
break;
|
break;
|
||||||
case MachineRepresentation::kTagged:
|
case MachineRepresentation::kTagged:
|
||||||
opcode = kPPC_LoadDecompressAnyTagged;
|
opcode = kPPC_LoadDecompressTagged;
|
||||||
break;
|
break;
|
||||||
#else
|
#else
|
||||||
case MachineRepresentation::kTaggedSigned: // Fall through.
|
case MachineRepresentation::kTaggedSigned: // Fall through.
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user