From 5767c9560414a5826a8186aba8ed6cfd5906904e Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Thu, 8 Sep 2022 11:43:15 +0200 Subject: [PATCH 0001/1772] [heap] Update page promotion heuristics This CL includes the following changes: 1) Ignore ShouldReduceMemory for MinorMC (since it can't move objects) 2) Make FLAG_page_promotion more explicit in the condition 3) Take wasted bytes into account for MinorMC (full GC can compact and "reset" wasted bytes) Bug: v8:12612 Change-Id: I64d214e692b8ecd20189c59e2a77807f05e43817 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879606 Reviewed-by: Michael Lippautz Commit-Queue: Omer Katz Cr-Commit-Position: refs/heads/main@{#83086} --- src/heap/mark-compact.cc | 26 ++++++++++++++++---------- src/heap/mark-compact.h | 1 + 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index fb55e462fd..38630ed87f 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -4150,10 +4150,8 @@ class Evacuator : public Malloced { // NewSpacePages with more live bytes than this threshold qualify for fast // evacuation. static intptr_t NewSpacePageEvacuationThreshold() { - if (v8_flags.page_promotion) - return v8_flags.page_promotion_threshold * - MemoryChunkLayout::AllocatableMemoryInDataPage() / 100; - return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize; + return v8_flags.page_promotion_threshold * + MemoryChunkLayout::AllocatableMemoryInDataPage() / 100; } Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor, @@ -4469,12 +4467,15 @@ size_t CreateAndExecuteEvacuationTasks( return wanted_num_tasks; } -bool ShouldMovePage(Page* p, intptr_t live_bytes, +bool ShouldMovePage(Page* p, intptr_t live_bytes, intptr_t wasted_bytes, + MemoryReductionMode memory_reduction_mode, AlwaysPromoteYoung always_promote_young) { Heap* heap = p->heap(); - const bool reduce_memory = heap->ShouldReduceMemory(); - return !reduce_memory && !p->NeverEvacuate() && - (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) && + return v8_flags.page_promotion && + (memory_reduction_mode == MemoryReductionMode::kNone) && + !p->NeverEvacuate() && + (live_bytes + wasted_bytes > + Evacuator::NewSpacePageEvacuationThreshold()) && (always_promote_young == AlwaysPromoteYoung::kYes || heap->new_space()->IsPromotionCandidate(p)) && heap->CanExpandOldGeneration(live_bytes); @@ -4511,7 +4512,11 @@ void MarkCompactCollector::EvacuatePagesInParallel() { intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page); DCHECK_LT(0, live_bytes_on_page); live_bytes += live_bytes_on_page; - if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kYes) || + MemoryReductionMode memory_reduction_mode = + heap()->ShouldReduceMemory() ? MemoryReductionMode::kShouldReduceMemory + : MemoryReductionMode::kNone; + if (ShouldMovePage(page, live_bytes_on_page, 0, memory_reduction_mode, + AlwaysPromoteYoung::kYes) || force_page_promotion) { EvacuateNewSpacePageVisitor::Move(page); DCHECK_EQ(heap()->old_space(), page->owner()); @@ -6669,7 +6674,8 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() { intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page); DCHECK_LT(0, live_bytes_on_page); live_bytes += live_bytes_on_page; - if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) { + if (ShouldMovePage(page, live_bytes_on_page, page->wasted_memory(), + MemoryReductionMode::kNone, AlwaysPromoteYoung::kNo)) { EvacuateNewSpacePageVisitor::Move(page); evacuation_items.emplace_back(ParallelWorkItem{}, page); } diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h index 40550d4441..9d960cd360 100644 --- a/src/heap/mark-compact.h +++ b/src/heap/mark-compact.h @@ -173,6 +173,7 @@ class LiveObjectVisitor : AllStatic { }; enum class AlwaysPromoteYoung { kYes, kNo }; +enum class MemoryReductionMode { kNone, kShouldReduceMemory }; enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD }; enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY }; From 33806ecad7bead12d493481cdf90269718031da4 Mon Sep 17 00:00:00 2001 From: Adam Klein Date: Thu, 8 Sep 2022 20:43:22 +0000 Subject: [PATCH 0002/1772] Revert "[fuchsia] Migrate d8 to a component framework v2 Fuchsia component" This reverts commit 50802793f74c0592b2406b5c1298b3d4153092f9. Reason for revert: blocking v8 roll: https://ci.chromium.org/ui/p/chromium/builders/try/fuchsia_x64/1301026/overview Original change's description: > [fuchsia] Migrate d8 to a component framework v2 Fuchsia component > > In the process, switch to using the Fuchsia GN SDK templates for > building the component and package. > > Bug: v8:12589 > Change-Id: I9b5a82accb0da2067e83bc80d691133550ce82cd > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879501 > Auto-Submit: Greg Thompson > Reviewed-by: Michael Achenbach > Reviewed-by: Alexander Schulze > Reviewed-by: Victor Gomes > Commit-Queue: Greg Thompson > Cr-Commit-Position: refs/heads/main@{#83084} Bug: v8:12589 Change-Id: I94ce2ef0e7cba5d39c8d18ca7dc7264289325e99 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885079 Bot-Commit: Rubber Stamper Reviewed-by: Michael Achenbach Commit-Queue: Rubber Stamper Auto-Submit: Adam Klein Cr-Commit-Position: refs/heads/main@{#83087} --- BUILD.gn | 19 ++++++------------- gni/OWNERS | 4 ++-- gni/v8.cml | 21 --------------------- gni/v8.cmx | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 60 insertions(+), 36 deletions(-) delete mode 100644 gni/v8.cml create mode 100644 gni/v8.cmx diff --git a/BUILD.gn b/BUILD.gn index 43667839c5..78bddc354e 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -6385,25 +6385,18 @@ group("v8_archive") { # TODO(dglazkov): Remove the "!build_with_chromium" condition once this clause # is removed from Chromium. if (is_fuchsia && !build_with_chromium) { - import("//build/config/fuchsia/generate_runner_scripts.gni") - import("//third_party/fuchsia-sdk/sdk/build/component.gni") - import("//third_party/fuchsia-sdk/sdk/build/package.gni") + import("//build/config/fuchsia/rules.gni") - fuchsia_component("d8_component") { + cr_fuchsia_package("d8_fuchsia_pkg") { testonly = true - manifest = "gni/v8.cml" - data_deps = [ ":d8" ] - } - - fuchsia_package("d8_pkg") { - testonly = true - package_name = "d8" - deps = [ ":d8_component" ] + binary = ":d8" + manifest = "gni/v8.cmx" + package_name_override = "d8" } fuchsia_package_installer("d8_fuchsia") { testonly = true - package = ":d8_pkg" + package = ":d8_fuchsia_pkg" package_name = "d8" } } diff --git a/gni/OWNERS b/gni/OWNERS index c20b8de5a2..fa1262b503 100644 --- a/gni/OWNERS +++ b/gni/OWNERS @@ -1,5 +1,5 @@ file:../INFRA_OWNERS -per-file v8.cml=victorgomes@chromium.org +per-file v8.cmx=victorgomes@chromium.org per-file release_branch_toggle.gni=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com -per-file release_branch_toggle.gni=vahl@chromium.org +per-file release_branch_toggle.gni=vahl@chromium.org \ No newline at end of file diff --git a/gni/v8.cml b/gni/v8.cml deleted file mode 100644 index 4d74c7626c..0000000000 --- a/gni/v8.cml +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2022 The V8 project authors -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. -{ - include: [ "syslog/client.shard.cml" ], - program: { - runner: "elf", - binary: "d8", - }, - use: [ - { - protocol: [ - "fuchsia.kernel.VmexResource", - ], - }, - { - storage: "tmp", - path: "/tmp", - }, - ], -} diff --git a/gni/v8.cmx b/gni/v8.cmx new file mode 100644 index 0000000000..45fd74a09f --- /dev/null +++ b/gni/v8.cmx @@ -0,0 +1,52 @@ +{ + "facets": { + "fuchsia.test": { + "system-services": [ + "fuchsia.kernel.VmexResource" + ] + } + }, + "sandbox": { + "dev": [ + "null", + "zero" + ], + "features": [ + "deprecated-ambient-replace-as-executable", + "isolated-cache-storage", + "isolated-persistent-storage", + "isolated-temp", + "root-ssl-certificates", + "vulkan" + ], + "services": [ + "fuchsia.accessibility.semantics.SemanticsManager", + "fuchsia.camera3.DeviceWatcher", + "fuchsia.device.NameProvider", + "fuchsia.fonts.Provider", + "fuchsia.intl.PropertyProvider", + "fuchsia.kernel.VmexResource", + "fuchsia.logger.Log", + "fuchsia.logger.LogSink", + "fuchsia.media.Audio", + "fuchsia.media.SessionAudioConsumerFactory", + "fuchsia.media.drm.Widevine", + "fuchsia.mediacodec.CodecFactory", + "fuchsia.memorypressure.Provider", + "fuchsia.net.NameLookup", + "fuchsia.net.interfaces.State", + "fuchsia.posix.socket.Provider", + "fuchsia.process.Launcher", + "fuchsia.sys.Environment", + "fuchsia.sys.Launcher", + "fuchsia.sys.Loader", + "fuchsia.sysmem.Allocator", + "fuchsia.ui.input.ImeService", + "fuchsia.ui.input.ImeVisibilityService", + "fuchsia.ui.scenic.Scenic", + "fuchsia.ui.policy.Presenter", + "fuchsia.vulkan.loader.Loader", + "fuchsia.web.ContextProvider" + ] + } +} From 95b02431bdef1a34bec401bb1ab67132bae58a41 Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Wed, 7 Sep 2022 21:34:11 -0700 Subject: [PATCH 0003/1772] [Temporal] Fix AddInstant and AddZonedDateTime Change AddInstant to use BigInt::FromNumber(isolate, factory->NewNumber instead of BigInt::FromInt64 to convert from double to BigInt. Sync AddZonedDateTime with https://github.com/tc39/proposal-temporal/pull/2303 which call AddInstant as ? instead of ! marking. Spec Text: https://tc39.es/proposal-temporal/#sec-temporal-addinstant https://tc39.es/proposal-temporal/#sec-temporal-addzoneddatetime PR: https://github.com/tc39/proposal-temporal/pull/2303 Bug: v8:11544 Change-Id: I4bd176294780f761341c25a5f71643b437f99c82 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3859165 Commit-Queue: Frank Tang Reviewed-by: Adam Klein Cr-Commit-Position: refs/heads/main@{#83088} --- src/objects/js-temporal-objects.cc | 110 ++++++++++++++--------------- test/test262/test262.status | 8 --- 2 files changed, 55 insertions(+), 63 deletions(-) diff --git a/src/objects/js-temporal-objects.cc b/src/objects/js-temporal-objects.cc index f7d4e6fab8..0ce1f96fa2 100644 --- a/src/objects/js-temporal-objects.cc +++ b/src/objects/js-temporal-objects.cc @@ -5448,10 +5448,9 @@ MaybeHandle AddZonedDateTime(Isolate* isolate, // 2. If all of years, months, weeks, and days are 0, then if (duration.years == 0 && duration.months == 0 && duration.weeks == 0 && time_duration.days == 0) { - // a. Return ! AddInstant(epochNanoseconds, hours, minutes, seconds, + // a. Return ? AddInstant(epochNanoseconds, hours, minutes, seconds, // milliseconds, microseconds, nanoseconds). - return AddInstant(isolate, epoch_nanoseconds, time_duration) - .ToHandleChecked(); + return AddInstant(isolate, epoch_nanoseconds, time_duration); } // 3. Let instant be ! CreateTemporalInstant(epochNanoseconds). Handle instant = @@ -5522,13 +5521,12 @@ MaybeHandle AddZonedDateTime(Isolate* isolate, BuiltinTimeZoneGetInstantFor(isolate, time_zone, intermediate_date_time, Disambiguation::kCompatible, method_name), BigInt); - // 10. Return ! AddInstant(intermediateInstant.[[Nanoseconds]], hours, + // 10. Return ? AddInstant(intermediateInstant.[[Nanoseconds]], hours, // minutes, seconds, milliseconds, microseconds, nanoseconds). time_duration.days = 0; return AddInstant(isolate, handle(intermediate_instant->nanoseconds(), isolate), - time_duration) - .ToHandleChecked(); + time_duration); } Maybe NanosecondsToDays(Isolate* isolate, @@ -5854,65 +5852,67 @@ MaybeHandle AddInstant(Isolate* isolate, Handle epoch_nanoseconds, const TimeDurationRecord& addend) { TEMPORAL_ENTER_FUNC(); + Factory* factory = isolate->factory(); // 1. Assert: hours, minutes, seconds, milliseconds, microseconds, and // nanoseconds are integer Number values. // 2. Let result be epochNanoseconds + ℤ(nanoseconds) + // ℤ(microseconds) × 1000ℤ + ℤ(milliseconds) × 10^6ℤ + ℤ(seconds) × 10^9ℤ + // ℤ(minutes) × 60ℤ × 10^9ℤ + ℤ(hours) × 3600ℤ × 10^9ℤ. - Handle result; - ASSIGN_RETURN_ON_EXCEPTION( - isolate, result, - BigInt::Add(isolate, epoch_nanoseconds, - BigInt::FromInt64(isolate, addend.nanoseconds)), - BigInt); - Handle temp; - ASSIGN_RETURN_ON_EXCEPTION( - isolate, temp, - BigInt::Multiply(isolate, BigInt::FromInt64(isolate, addend.microseconds), - BigInt::FromInt64(isolate, 1000)), - BigInt); - ASSIGN_RETURN_ON_EXCEPTION(isolate, result, - BigInt::Add(isolate, result, temp), BigInt); - ASSIGN_RETURN_ON_EXCEPTION( - isolate, temp, - BigInt::Multiply(isolate, BigInt::FromInt64(isolate, addend.milliseconds), - BigInt::FromInt64(isolate, 1000000)), - BigInt); - ASSIGN_RETURN_ON_EXCEPTION(isolate, result, - BigInt::Add(isolate, result, temp), BigInt); + // epochNanoseconds + ℤ(nanoseconds) + Handle result = + BigInt::Add( + isolate, epoch_nanoseconds, + BigInt::FromNumber(isolate, factory->NewNumber(addend.nanoseconds)) + .ToHandleChecked()) + .ToHandleChecked(); - ASSIGN_RETURN_ON_EXCEPTION( - isolate, temp, - BigInt::Multiply(isolate, BigInt::FromInt64(isolate, addend.seconds), - BigInt::FromInt64(isolate, 1000000000)), - BigInt); - ASSIGN_RETURN_ON_EXCEPTION(isolate, result, - BigInt::Add(isolate, result, temp), BigInt); + // + ℤ(microseconds) × 1000ℤ + Handle temp = + BigInt::Multiply( + isolate, + BigInt::FromNumber(isolate, factory->NewNumber(addend.microseconds)) + .ToHandleChecked(), + BigInt::FromInt64(isolate, 1000)) + .ToHandleChecked(); + result = BigInt::Add(isolate, result, temp).ToHandleChecked(); - ASSIGN_RETURN_ON_EXCEPTION( - isolate, temp, - BigInt::Multiply(isolate, BigInt::FromInt64(isolate, addend.minutes), - BigInt::FromInt64(isolate, 1000000000)), - BigInt); - ASSIGN_RETURN_ON_EXCEPTION( - isolate, temp, - BigInt::Multiply(isolate, temp, BigInt::FromInt64(isolate, 60)), BigInt); - ASSIGN_RETURN_ON_EXCEPTION(isolate, result, - BigInt::Add(isolate, result, temp), BigInt); + // + ℤ(milliseconds) × 10^6ℤ + temp = BigInt::Multiply(isolate, + BigInt::FromNumber( + isolate, factory->NewNumber(addend.milliseconds)) + .ToHandleChecked(), + BigInt::FromInt64(isolate, 1000000)) + .ToHandleChecked(); + result = BigInt::Add(isolate, result, temp).ToHandleChecked(); - ASSIGN_RETURN_ON_EXCEPTION( - isolate, temp, - BigInt::Multiply(isolate, BigInt::FromInt64(isolate, addend.hours), - BigInt::FromInt64(isolate, 1000000000)), - BigInt); - ASSIGN_RETURN_ON_EXCEPTION( - isolate, temp, - BigInt::Multiply(isolate, temp, BigInt::FromInt64(isolate, 3600)), - BigInt); - ASSIGN_RETURN_ON_EXCEPTION(isolate, result, - BigInt::Add(isolate, result, temp), BigInt); + // + ℤ(seconds) × 10^9ℤ + temp = BigInt::Multiply( + isolate, + BigInt::FromNumber(isolate, factory->NewNumber(addend.seconds)) + .ToHandleChecked(), + BigInt::FromInt64(isolate, 1000000000)) + .ToHandleChecked(); + result = BigInt::Add(isolate, result, temp).ToHandleChecked(); + + // + ℤ(minutes) × 60ℤ × 10^9ℤ. + temp = BigInt::Multiply( + isolate, + BigInt::FromNumber(isolate, factory->NewNumber(addend.minutes)) + .ToHandleChecked(), + BigInt::FromInt64(isolate, 60000000000)) + .ToHandleChecked(); + result = BigInt::Add(isolate, result, temp).ToHandleChecked(); + + // + ℤ(hours) × 3600ℤ × 10^9ℤ. + temp = BigInt::Multiply( + isolate, + BigInt::FromNumber(isolate, factory->NewNumber(addend.hours)) + .ToHandleChecked(), + BigInt::FromInt64(isolate, 3600000000000)) + .ToHandleChecked(); + result = BigInt::Add(isolate, result, temp).ToHandleChecked(); // 3. If ! IsValidEpochNanoseconds(result) is false, throw a RangeError // exception. diff --git a/test/test262/test262.status b/test/test262/test262.status index c5817263b9..2d7e0ef89b 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -583,9 +583,7 @@ 'intl402/Temporal/Calendar/prototype/yearMonthFromFields/order-of-operations': [FAIL], 'intl402/Temporal/Duration/compare/relativeto-hour': [FAIL], 'built-ins/Temporal/PlainTime/prototype/equals/argument-string-no-implicit-midnight': [FAIL], - 'built-ins/Temporal/Duration/prototype/add/nanoseconds-is-number-max-value-1': [FAIL], 'built-ins/Temporal/Duration/prototype/round/total-duration-nanoseconds-too-large-with-zoned-datetime': [PASS, FAIL], - 'built-ins/Temporal/Duration/prototype/subtract/nanoseconds-is-number-max-value-1': [FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-with-fractional-days-different-sign': [FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-with-fractional-days': [FAIL], 'built-ins/Temporal/PlainTime/prototype/add/argument-string-duration-too-large': [FAIL], @@ -593,8 +591,6 @@ 'intl402/Temporal/TimeZone/prototype/getNextTransition/subtract-second-and-nanosecond-from-last-transition': [FAIL], 'intl402/Temporal/TimeZone/prototype/getPreviousTransition/nanoseconds-subtracted-or-added-at-dst-transition': [FAIL], - 'built-ins/Temporal/Instant/prototype/add/minimum-maximum-instant': [FAIL], - 'built-ins/Temporal/Instant/prototype/subtract/minimum-maximum-instant': [FAIL], 'intl402/Temporal/TimeZone/from/etc-timezone': [FAIL], 'intl402/Temporal/TimeZone/from/iana-legacy-names': [FAIL], 'intl402/Temporal/TimeZone/prototype/getNextTransition/transition-at-instant-boundaries': [FAIL], @@ -629,10 +625,6 @@ # UBSan complain about static_cast from double in AddISODate() 'built-ins/Temporal/Calendar/prototype/dateAdd/argument-duration-years-and-months-number-max-value': [SKIP], - # Flaky and cause timeout sometimes - 'built-ins/Temporal/Duration/prototype/add/days-is-number-max-value-with-zoneddatetime': [SKIP], - 'built-ins/Temporal/Duration/prototype/subtract/days-is-number-max-value-with-zoneddatetime': [SKIP], - 'harness/temporalHelpers-one-shift-time-zone': [SKIP], 'built-ins/Temporal/Instant/compare/instant-string': [FAIL], From 68de20179cfec137d7197bb633786f238293f0cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Samuel=20Gro=C3=9F?= Date: Thu, 8 Sep 2022 16:48:03 +0000 Subject: [PATCH 0004/1772] [sandbox] Skip mkgrokdump test in non-sandbox mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When the sandbox is disabled, object layouts are now different as ExternalPointerSlots are then 64-bit (raw pointers) instead of 32-bit (ExternalPointerHandles). Bug: v8:10391 Change-Id: Ia03d1ae9300fad96e40b77f0ed9544a1a118b74a Cq-Include-Trybots: luci.v8.try.triggered:v8_linux64_no_sandbox_dbg_ng_triggered Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3884075 Reviewed-by: Igor Sheludko Auto-Submit: Samuel Groß Reviewed-by: Michael Achenbach Commit-Queue: Samuel Groß Cr-Commit-Position: refs/heads/main@{#83089} --- test/mkgrokdump/mkgrokdump.status | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/mkgrokdump/mkgrokdump.status b/test/mkgrokdump/mkgrokdump.status index f6a9bf559e..9ed78919fb 100644 --- a/test/mkgrokdump/mkgrokdump.status +++ b/test/mkgrokdump/mkgrokdump.status @@ -5,8 +5,8 @@ [ # Only test for default mode x64. -['variant != default or arch != x64 or lite_mode or not pointer_compression', { +['variant != default or arch != x64 or lite_mode or not pointer_compression or not sandbox', { '*': [SKIP], -}], # variant != default or arch != x64 or lite_mode +}], # variant != default or arch != x64 or lite_mode or not pointer_compression or not sandbox ] From 14d9b9a246a47948efad6d723a644404c264b623 Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Thu, 8 Sep 2022 14:18:54 -0700 Subject: [PATCH 0005/1772] Reland "[Temporal] Use double/int32_t instead of int64_t for duration parsing" This is a reland of commit a165e82ea79130243e9934b8f6c2bb09021ba9ae The reason of revert is SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior ../../src/objects/js-temporal-objects.cc:3837:22 which is the line "nanoseconds_mv = std::round((seconds_mv - std::floor(seconds_mv)) * 1e9);" where seconds_mv is a double and nanoseconds_mv is a int32_t In this reland, we change the type of nanoseconds_mv to double to avoid the ubsan error. Original change's description: > [Temporal] Use double/int32_t instead of int64_t for duration parsing > > Use double and int32_t instead of int64_t in duration parsing result > so we can parse very large duration fields as infinity and throw RangeError in later stages. The three fractional parts can hold up value from 0 to 999,999,999 so we use int32_t to hold it. Other part could be infinity so we use double to hold it. Also rearrange the order of the three int32_t in the struct ParsedISO8601Duration after all the double > > Bug: v8:11544 > Change-Id: I7e5b02f7c7bbb60997f1419f016aed61dd3e0d6c > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3840761 > Reviewed-by: Shu-yu Guo > Commit-Queue: Frank Tang > Cr-Commit-Position: refs/heads/main@{#82754} Bug: v8:11544 Change-Id: If8b72cb4912d8b4fc4c286fc856ea59df5cf0bb7 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3858576 Reviewed-by: Adam Klein Commit-Queue: Frank Tang Cr-Commit-Position: refs/heads/main@{#83090} --- src/objects/js-temporal-objects.cc | 51 ++++++++++++++++-------------- src/temporal/temporal-parser.cc | 47 ++++++++------------------- src/temporal/temporal-parser.h | 28 ++++++++-------- test/test262/test262.status | 2 -- 4 files changed, 56 insertions(+), 72 deletions(-) diff --git a/src/objects/js-temporal-objects.cc b/src/objects/js-temporal-objects.cc index 0ce1f96fa2..6b10a0bb37 100644 --- a/src/objects/js-temporal-objects.cc +++ b/src/objects/js-temporal-objects.cc @@ -3765,7 +3765,7 @@ Maybe CreateDurationRecord(Isolate* isolate, return Just(duration); } -inline int64_t IfEmptyReturnZero(int64_t value) { +inline double IfEmptyReturnZero(double value) { return value == ParsedISO8601Duration::kEmpty ? 0 : value; } @@ -3774,18 +3774,10 @@ Maybe ParseTemporalDurationString(Isolate* isolate, Handle iso_string) { TEMPORAL_ENTER_FUNC(); // In this funciton, we use 'double' as type for all mathematical values - // except the three three units < seconds. For all others, we use 'double' // because in // https://tc39.es/proposal-temporal/#sec-properties-of-temporal-duration-instances // they are "A float64-representable integer representing the number" in the // internal slots. - // For milliseconds_mv, microseconds_mv, and nanoseconds_mv, we use int32_t - // instead because their maximum number during calculation is 999999999, - // which can be encoded in 30 bits and the parsed.seconds_fraction return from - // the ISO8601 parser are stored in an integer, in the unit of nanoseconds. - // Therefore, use "int32_t" will avoid rounding error for the final - // calculating of nanoseconds_mv. - // // 1. Let duration be ParseText(StringToCodePoints(isoString), // TemporalDurationString). // 2. If duration is a List of errors, throw a RangeError exception. @@ -3827,7 +3819,11 @@ Maybe ParseTemporalDurationString(Isolate* isolate, Nothing()); } // b. Let fHoursDigits be the substring of CodePointsToString(fHours) - // from 1. c. Let fHoursScale be the length of fHoursDigits. d. Let + // from 1. + // + // c. Let fHoursScale be the length of fHoursDigits. + // + // d. Let // minutesMV be ! ToIntegerOrInfinity(fHoursDigits) / 10^fHoursScale × 60. minutes_mv = IfEmptyReturnZero(parsed->hours_fraction) * 60.0 / 1e9; // 10. Else, @@ -3847,9 +3843,12 @@ Maybe ParseTemporalDurationString(Isolate* isolate, Nothing()); } // b. Let fMinutesDigits be the substring of CodePointsToString(fMinutes) - // from 1. c. Let fMinutesScale be the length of fMinutesDigits. d. Let - // secondsMV be ! ToIntegerOrInfinity(fMinutesDigits) / 10^fMinutesScale - // × 60. + // from 1. + // + // c. Let fMinutesScale be the length of fMinutesDigits. + // + // d. Let secondsMV be ! ToIntegerOrInfinity(fMinutesDigits) / + // 10^fMinutesScale × 60. seconds_mv = IfEmptyReturnZero(parsed->minutes_fraction) * 60.0 / 1e9; // 12. Else if seconds is not empty, then } else if (parsed->whole_seconds != ParsedISO8601Duration::kEmpty) { @@ -3860,13 +3859,21 @@ Maybe ParseTemporalDurationString(Isolate* isolate, // a. Let secondsMV be remainder(minutesMV, 1) × 60. seconds_mv = (minutes_mv - std::floor(minutes_mv)) * 60.0; } - int32_t milliseconds_mv; - int32_t nanoseconds_mv; + double milliseconds_mv, microseconds_mv, nanoseconds_mv; + // Note: In step 14-17, we calculate from nanoseconds_mv to miilliseconds_mv + // in the reversee order of the spec text to avoid numerical errors would be + // introduced by multiple division inside the remainder operations. If we + // strickly follow the order by using double, the end result of nanoseconds_mv + // will be wrong due to numerical errors. + // // 14. If fSeconds is not empty, then if (parsed->seconds_fraction != ParsedISO8601Duration::kEmpty) { // a. Let fSecondsDigits be the substring of CodePointsToString(fSeconds) - // from 1. b. Let fSecondsScale be the length of fSecondsDigits. c. Let - // millisecondsMV be ! ToIntegerOrInfinity(fSecondsDigits) / + // from 1. + // + // b. Let fSecondsScale be the length of fSecondsDigits. + // + // c. Let millisecondsMV be ! ToIntegerOrInfinity(fSecondsDigits) / // 10^fSecondsScale × 1000. DCHECK_LE(IfEmptyReturnZero(parsed->seconds_fraction), 1e9); nanoseconds_mv = std::round(IfEmptyReturnZero(parsed->seconds_fraction)); @@ -3875,15 +3882,13 @@ Maybe ParseTemporalDurationString(Isolate* isolate, // a. Let millisecondsMV be remainder(secondsMV, 1) × 1000. nanoseconds_mv = std::round((seconds_mv - std::floor(seconds_mv)) * 1e9); } - milliseconds_mv = nanoseconds_mv / 1000000; + milliseconds_mv = std::floor(nanoseconds_mv / 1000000); // 16. Let microsecondsMV be remainder(millisecondsMV, 1) × 1000. - int32_t microseconds_mv = (nanoseconds_mv / 1000) % 1000; + microseconds_mv = std::floor(nanoseconds_mv / 1000) - + std::floor(nanoseconds_mv / 1000000) * 1000; // 17. Let nanosecondsMV be remainder(microsecondsMV, 1) × 1000. - nanoseconds_mv = nanoseconds_mv % 1000; + nanoseconds_mv -= std::floor(nanoseconds_mv / 1000) * 1000; - DCHECK_LE(milliseconds_mv, 1000); - DCHECK_LE(microseconds_mv, 1000); - DCHECK_LE(nanoseconds_mv, 1000); // 18. If sign contains the code point 0x002D (HYPHEN-MINUS) or 0x2212 (MINUS // SIGN), then a. Let factor be −1. // 19. Else, diff --git a/src/temporal/temporal-parser.cc b/src/temporal/temporal-parser.cc index 93ebde4591..5e07ac8d6f 100644 --- a/src/temporal/temporal-parser.cc +++ b/src/temporal/temporal-parser.cc @@ -186,14 +186,6 @@ int32_t ScanFractionalPart(base::Vector str, int32_t s, int32_t* out) { return cur - s; } -template -int32_t ScanFractionalPart(base::Vector str, int32_t s, int64_t* out) { - int32_t out32; - int32_t len = ScanFractionalPart(str, s, &out32); - *out = out32; - return len; -} - // TimeFraction: FractionalPart SCAN_FORWARD(TimeFractionalPart, FractionalPart, int32_t) @@ -1438,21 +1430,10 @@ bool SatisfyTemporalCalendarString(base::Vector str, // Duration -SCAN_FORWARD(TimeFractionalPart, FractionalPart, int64_t) - -template -int32_t ScanFraction(base::Vector str, int32_t s, int64_t* out) { - if (str.length() < (s + 2) || !IsDecimalSeparator(str[s])) return 0; - int32_t len = ScanTimeFractionalPart(str, s + 1, out); - return (len == 0) ? 0 : len + 1; -} - -SCAN_FORWARD(TimeFraction, Fraction, int64_t) - // Digits : Digit [Digits] template -int32_t ScanDigits(base::Vector str, int32_t s, int64_t* out) { +int32_t ScanDigits(base::Vector str, int32_t s, double* out) { if (str.length() < (s + 1) || !IsDecimalDigit(str[s])) return 0; *out = ToInt(str[s]); int32_t len = 1; @@ -1463,38 +1444,38 @@ int32_t ScanDigits(base::Vector str, int32_t s, int64_t* out) { return len; } -SCAN_FORWARD(DurationYears, Digits, int64_t) -SCAN_FORWARD(DurationMonths, Digits, int64_t) -SCAN_FORWARD(DurationWeeks, Digits, int64_t) -SCAN_FORWARD(DurationDays, Digits, int64_t) +SCAN_FORWARD(DurationYears, Digits, double) +SCAN_FORWARD(DurationMonths, Digits, double) +SCAN_FORWARD(DurationWeeks, Digits, double) +SCAN_FORWARD(DurationDays, Digits, double) // DurationWholeHours : Digits -SCAN_FORWARD(DurationWholeHours, Digits, int64_t) +SCAN_FORWARD(DurationWholeHours, Digits, double) // DurationWholeMinutes : Digits -SCAN_FORWARD(DurationWholeMinutes, Digits, int64_t) +SCAN_FORWARD(DurationWholeMinutes, Digits, double) // DurationWholeSeconds : Digits -SCAN_FORWARD(DurationWholeSeconds, Digits, int64_t) +SCAN_FORWARD(DurationWholeSeconds, Digits, double) // DurationHoursFraction : TimeFraction -SCAN_FORWARD(DurationHoursFraction, TimeFraction, int64_t) +SCAN_FORWARD(DurationHoursFraction, TimeFraction, int32_t) // DurationMinutesFraction : TimeFraction -SCAN_FORWARD(DurationMinutesFraction, TimeFraction, int64_t) +SCAN_FORWARD(DurationMinutesFraction, TimeFraction, int32_t) // DurationSecondsFraction : TimeFraction -SCAN_FORWARD(DurationSecondsFraction, TimeFraction, int64_t) +SCAN_FORWARD(DurationSecondsFraction, TimeFraction, int32_t) #define DURATION_WHOLE_FRACTION_DESIGNATOR(Name, name, d) \ template \ int32_t ScanDurationWhole##Name##FractionDesignator( \ base::Vector str, int32_t s, ParsedISO8601Duration* r) { \ int32_t cur = s; \ - int64_t whole = ParsedISO8601Duration::kEmpty; \ + double whole = ParsedISO8601Duration::kEmpty; \ cur += ScanDurationWhole##Name(str, cur, &whole); \ if (cur == s) return 0; \ - int64_t fraction = ParsedISO8601Duration::kEmpty; \ + int32_t fraction = ParsedISO8601Duration::kEmpty; \ int32_t len = ScanDuration##Name##Fraction(str, cur, &fraction); \ cur += len; \ if (str.length() < (cur + 1) || AsciiAlphaToLower(str[cur++]) != (d)) \ @@ -1570,7 +1551,7 @@ int32_t ScanDurationTime(base::Vector str, int32_t s, int32_t ScanDuration##Name##Designator(base::Vector str, int32_t s, \ ParsedISO8601Duration* r) { \ int32_t cur = s; \ - int64_t name; \ + double name; \ if ((cur += ScanDuration##Name(str, cur, &name)) == s) return 0; \ if (str.length() < (cur + 1) || AsciiAlphaToLower(str[cur++]) != (d)) { \ return 0; \ diff --git a/src/temporal/temporal-parser.h b/src/temporal/temporal-parser.h index 785ae8de11..593741bfcf 100644 --- a/src/temporal/temporal-parser.h +++ b/src/temporal/temporal-parser.h @@ -95,20 +95,20 @@ struct ParsedISO8601Result { * field is "undefined" after parsing for all fields except sign. */ struct ParsedISO8601Duration { - int64_t sign; // Sign production - int64_t years; // DurationYears production - int64_t months; // DurationMonths production - int64_t weeks; // DurationWeeks production - int64_t days; // DurationDays production - int64_t whole_hours; // DurationWholeHours production - int64_t hours_fraction; // DurationHoursFraction, in unit of 1e-9 hours - int64_t whole_minutes; // DurationWholeMinutes production - int64_t minutes_fraction; // DurationMinuteFraction, in unit of 1e-9 minutes - int64_t whole_seconds; // DurationWholeSeconds production - int64_t seconds_fraction; // DurationSecondFraction, in unit of nanosecond ( + double sign; // Sign production + double years; // DurationYears production + double months; // DurationMonths production + double weeks; // DurationWeeks production + double days; // DurationDays production + double whole_hours; // DurationWholeHours production + double whole_minutes; // DurationWholeMinutes production + double whole_seconds; // DurationWholeSeconds production + int32_t hours_fraction; // DurationHoursFraction, in unit of 1e-9 hours + int32_t minutes_fraction; // DurationMinuteFraction, in unit of 1e-9 minutes + int32_t seconds_fraction; // DurationSecondFraction, in unit of nanosecond ( // 1e-9 seconds). - static constexpr int64_t kEmpty = -1; + static constexpr int32_t kEmpty = -1; ParsedISO8601Duration() : sign(1), years(kEmpty), @@ -116,10 +116,10 @@ struct ParsedISO8601Duration { weeks(kEmpty), days(kEmpty), whole_hours(kEmpty), - hours_fraction(kEmpty), whole_minutes(kEmpty), - minutes_fraction(kEmpty), whole_seconds(kEmpty), + hours_fraction(kEmpty), + minutes_fraction(kEmpty), seconds_fraction(kEmpty) {} }; diff --git a/test/test262/test262.status b/test/test262/test262.status index 2d7e0ef89b..b44c1493d0 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -586,8 +586,6 @@ 'built-ins/Temporal/Duration/prototype/round/total-duration-nanoseconds-too-large-with-zoned-datetime': [PASS, FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-with-fractional-days-different-sign': [FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-with-fractional-days': [FAIL], - 'built-ins/Temporal/PlainTime/prototype/add/argument-string-duration-too-large': [FAIL], - 'built-ins/Temporal/PlainTime/prototype/subtract/argument-string-duration-too-large': [FAIL], 'intl402/Temporal/TimeZone/prototype/getNextTransition/subtract-second-and-nanosecond-from-last-transition': [FAIL], 'intl402/Temporal/TimeZone/prototype/getPreviousTransition/nanoseconds-subtracted-or-added-at-dst-transition': [FAIL], From 79da7bbb93d5c74df45bd571d4a59e331313e777 Mon Sep 17 00:00:00 2001 From: jiepan Date: Mon, 5 Sep 2022 13:47:11 +0800 Subject: [PATCH 0006/1772] [wasm][revec] Add YMM register in register allocation Bug: v8:12716 Change-Id: I0a1e807f7b0c64afa7d259361c47314e9c9e30db Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3867140 Reviewed-by: Thibaud Michaud Commit-Queue: Jie Pan Reviewed-by: Deepti Gandluri Cr-Commit-Position: refs/heads/main@{#83091} --- src/codegen/register-configuration.cc | 38 ++++++++++++++++--- src/codegen/register-configuration.h | 22 ++++++++++- src/codegen/x64/register-x64.h | 2 + src/compiler/backend/register-allocator.cc | 17 +++++++-- .../register-configuration-unittest.cc | 12 ++++-- .../backend/instruction-sequence-unittest.cc | 19 ++++++++-- .../backend/instruction-sequence-unittest.h | 1 + 7 files changed, 93 insertions(+), 18 deletions(-) diff --git a/src/codegen/register-configuration.cc b/src/codegen/register-configuration.cc index 0c589fd157..8b7b9f0010 100644 --- a/src/codegen/register-configuration.cc +++ b/src/codegen/register-configuration.cc @@ -58,6 +58,10 @@ static_assert(RegisterConfiguration::kMaxFPRegisters >= DoubleRegister::kNumRegisters); static_assert(RegisterConfiguration::kMaxFPRegisters >= Simd128Register::kNumRegisters); +#if V8_TARGET_ARCH_X64 +static_assert(RegisterConfiguration::kMaxFPRegisters >= + Simd256Register::kNumRegisters); +#endif static int get_num_simd128_registers() { return @@ -68,6 +72,8 @@ static int get_num_simd128_registers() { #endif // V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64 } +static int get_num_simd256_registers() { return 0; } + // Callers on architectures other than Arm expect this to be be constant // between build and runtime. Avoid adding variability on other platforms. static int get_num_allocatable_double_registers() { @@ -114,6 +120,8 @@ static int get_num_allocatable_simd128_registers() { #endif } +static int get_num_allocatable_simd256_registers() { return 0; } + // Callers on architectures other than Arm expect this to be be constant // between build and runtime. Avoid adding variability on other platforms. static const int* get_allocatable_double_codes() { @@ -140,9 +148,11 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration { ArchDefaultRegisterConfiguration() : RegisterConfiguration( kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters, - get_num_simd128_registers(), kMaxAllocatableGeneralRegisterCount, + get_num_simd128_registers(), get_num_simd256_registers(), + kMaxAllocatableGeneralRegisterCount, get_num_allocatable_double_registers(), - get_num_allocatable_simd128_registers(), kAllocatableGeneralCodes, + get_num_allocatable_simd128_registers(), + get_num_allocatable_simd256_registers(), kAllocatableGeneralCodes, get_allocatable_double_codes(), get_allocatable_simd128_codes()) {} }; @@ -160,9 +170,11 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration { std::unique_ptr allocatable_general_register_names) : RegisterConfiguration( kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters, - get_num_simd128_registers(), num_allocatable_general_registers, + get_num_simd128_registers(), get_num_simd256_registers(), + num_allocatable_general_registers, get_num_allocatable_double_registers(), get_num_allocatable_simd128_registers(), + get_num_allocatable_simd256_registers(), allocatable_general_register_codes.get(), get_allocatable_double_codes(), get_allocatable_simd128_codes()), allocatable_general_register_codes_( @@ -218,22 +230,26 @@ const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters( RegisterConfiguration::RegisterConfiguration( AliasingKind fp_aliasing_kind, int num_general_registers, int num_double_registers, int num_simd128_registers, - int num_allocatable_general_registers, int num_allocatable_double_registers, - int num_allocatable_simd128_registers, const int* allocatable_general_codes, + int num_simd256_registers, int num_allocatable_general_registers, + int num_allocatable_double_registers, int num_allocatable_simd128_registers, + int num_allocatable_simd256_registers, const int* allocatable_general_codes, const int* allocatable_double_codes, const int* independent_allocatable_simd128_codes) : num_general_registers_(num_general_registers), num_float_registers_(0), num_double_registers_(num_double_registers), num_simd128_registers_(num_simd128_registers), + num_simd256_registers_(num_simd256_registers), num_allocatable_general_registers_(num_allocatable_general_registers), num_allocatable_float_registers_(0), num_allocatable_double_registers_(num_allocatable_double_registers), num_allocatable_simd128_registers_(num_allocatable_simd128_registers), + num_allocatable_simd256_registers_(num_allocatable_simd256_registers), allocatable_general_codes_mask_(0), allocatable_float_codes_mask_(0), allocatable_double_codes_mask_(0), allocatable_simd128_codes_mask_(0), + allocatable_simd256_codes_mask_(0), allocatable_general_codes_(allocatable_general_codes), allocatable_double_codes_(allocatable_double_codes), fp_aliasing_kind_(fp_aliasing_kind) { @@ -281,9 +297,17 @@ RegisterConfiguration::RegisterConfiguration( for (int i = 0; i < num_allocatable_float_registers_; ++i) { allocatable_float_codes_[i] = allocatable_simd128_codes_[i] = allocatable_double_codes_[i]; +#if V8_TARGET_ARCH_X64 + allocatable_simd256_codes_[i] = allocatable_double_codes_[i]; +#endif } allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ = allocatable_double_codes_mask_; +#if V8_TARGET_ARCH_X64 + num_simd256_registers_ = num_double_registers_; + num_allocatable_simd256_registers_ = num_allocatable_double_registers_; + allocatable_simd256_codes_mask_ = allocatable_double_codes_mask_; +#endif } else { DCHECK_EQ(fp_aliasing_kind_, AliasingKind::kIndependent); DCHECK_NE(independent_allocatable_simd128_codes, nullptr); @@ -302,7 +326,9 @@ RegisterConfiguration::RegisterConfiguration( } } -// Assert that kFloat32, kFloat64, and kSimd128 are consecutive values. +// Assert that kFloat32, kFloat64, kSimd128 and kSimd256 are consecutive values. +static_assert(static_cast(MachineRepresentation::kSimd256) == + static_cast(MachineRepresentation::kSimd128) + 1); static_assert(static_cast(MachineRepresentation::kSimd128) == static_cast(MachineRepresentation::kFloat64) + 1); static_assert(static_cast(MachineRepresentation::kFloat64) == diff --git a/src/codegen/register-configuration.h b/src/codegen/register-configuration.h index 537079cdd6..e91a9bae40 100644 --- a/src/codegen/register-configuration.h +++ b/src/codegen/register-configuration.h @@ -34,9 +34,10 @@ class V8_EXPORT_PRIVATE RegisterConfiguration { RegisterConfiguration( AliasingKind fp_aliasing_kind, int num_general_registers, int num_double_registers, int num_simd128_registers, - int num_allocatable_general_registers, + int num_simd256_registers, int num_allocatable_general_registers, int num_allocatable_double_registers, int num_allocatable_simd128_registers, + int num_allocatable_simd256_registers, const int* allocatable_general_codes, const int* allocatable_double_codes, const int* independent_allocatable_simd128_codes = nullptr); @@ -44,6 +45,7 @@ class V8_EXPORT_PRIVATE RegisterConfiguration { int num_float_registers() const { return num_float_registers_; } int num_double_registers() const { return num_double_registers_; } int num_simd128_registers() const { return num_simd128_registers_; } + int num_simd256_registers() const { return num_simd256_registers_; } int num_allocatable_general_registers() const { return num_allocatable_general_registers_; } @@ -59,6 +61,10 @@ class V8_EXPORT_PRIVATE RegisterConfiguration { int num_allocatable_simd128_registers() const { return num_allocatable_simd128_registers_; } + int num_allocatable_simd256_registers() const { + return num_allocatable_simd256_registers_; + } + AliasingKind fp_aliasing_kind() const { return fp_aliasing_kind_; } int32_t allocatable_general_codes_mask() const { return allocatable_general_codes_mask_; @@ -97,6 +103,13 @@ class V8_EXPORT_PRIVATE RegisterConfiguration { bool IsAllocatableSimd128Code(int index) const { return ((1 << index) & allocatable_simd128_codes_mask_) != 0; } + int GetAllocatableSimd256Code(int index) const { + DCHECK(index >= 0 && index < num_allocatable_simd256_registers()); + return allocatable_simd256_codes_[index]; + } + bool IsAllocatableSimd256Code(int index) const { + return ((1 << index) & allocatable_simd256_codes_mask_) != 0; + } const int* allocatable_general_codes() const { return allocatable_general_codes_; @@ -110,6 +123,9 @@ class V8_EXPORT_PRIVATE RegisterConfiguration { const int* allocatable_simd128_codes() const { return allocatable_simd128_codes_; } + const int* allocatable_simd256_codes() const { + return allocatable_simd256_codes_; + } // Aliasing calculations for floating point registers, when fp_aliasing_kind() // is COMBINE. Currently only implemented for kFloat32, kFloat64, or kSimd128 @@ -130,18 +146,22 @@ class V8_EXPORT_PRIVATE RegisterConfiguration { int num_float_registers_; const int num_double_registers_; int num_simd128_registers_; + int num_simd256_registers_; int num_allocatable_general_registers_; int num_allocatable_float_registers_; int num_allocatable_double_registers_; int num_allocatable_simd128_registers_; + int num_allocatable_simd256_registers_; int32_t allocatable_general_codes_mask_; int32_t allocatable_float_codes_mask_; int32_t allocatable_double_codes_mask_; int32_t allocatable_simd128_codes_mask_; + int32_t allocatable_simd256_codes_mask_; const int* allocatable_general_codes_; int allocatable_float_codes_[kMaxFPRegisters]; const int* allocatable_double_codes_; int allocatable_simd128_codes_[kMaxFPRegisters]; + int allocatable_simd256_codes_[kMaxFPRegisters]; AliasingKind fp_aliasing_kind_; }; diff --git a/src/codegen/x64/register-x64.h b/src/codegen/x64/register-x64.h index 8715a14012..4ab85275cc 100644 --- a/src/codegen/x64/register-x64.h +++ b/src/codegen/x64/register-x64.h @@ -233,6 +233,8 @@ using DoubleRegister = XMMRegister; using Simd128Register = XMMRegister; +using Simd256Register = YMMRegister; + #define DECLARE_REGISTER(R) \ constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R); DOUBLE_REGISTERS(DECLARE_REGISTER) diff --git a/src/compiler/backend/register-allocator.cc b/src/compiler/backend/register-allocator.cc index e705a4d89e..c0c9db2bb1 100644 --- a/src/compiler/backend/register-allocator.cc +++ b/src/compiler/backend/register-allocator.cc @@ -33,7 +33,6 @@ static constexpr int kFloat32Bit = static constexpr int kSimd128Bit = RepresentationBit(MachineRepresentation::kSimd128); - const InstructionBlock* GetContainingLoop(const InstructionSequence* sequence, const InstructionBlock* block) { RpoNumber index = block->loop_header(); @@ -1493,6 +1492,7 @@ void TopTierRegisterAllocationData::MarkFixedUse(MachineRepresentation rep, switch (rep) { case MachineRepresentation::kFloat32: case MachineRepresentation::kSimd128: + case MachineRepresentation::kSimd256: if (kFPAliasing == AliasingKind::kOverlap) { fixed_fp_register_use_->Add(index); } else if (kFPAliasing == AliasingKind::kIndependent) { @@ -1526,7 +1526,8 @@ bool TopTierRegisterAllocationData::HasFixedUse(MachineRepresentation rep, int index) { switch (rep) { case MachineRepresentation::kFloat32: - case MachineRepresentation::kSimd128: { + case MachineRepresentation::kSimd128: + case MachineRepresentation::kSimd256: { if (kFPAliasing == AliasingKind::kOverlap) { return fixed_fp_register_use_->Contains(index); } else if (kFPAliasing == AliasingKind::kIndependent) { @@ -1561,6 +1562,7 @@ void TopTierRegisterAllocationData::MarkAllocated(MachineRepresentation rep, switch (rep) { case MachineRepresentation::kFloat32: case MachineRepresentation::kSimd128: + case MachineRepresentation::kSimd256: if (kFPAliasing == AliasingKind::kOverlap) { assigned_double_registers_->Add(index); } else if (kFPAliasing == AliasingKind::kIndependent) { @@ -1937,6 +1939,10 @@ void LiveRangeBuilder::AddInitialIntervals(const InstructionBlock* block, int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) { int result = -index - 1; switch (rep) { + case MachineRepresentation::kSimd256: + result -= + kNumberOfFixedRangesPerRegister * config()->num_simd128_registers(); + V8_FALLTHROUGH; case MachineRepresentation::kSimd128: result -= kNumberOfFixedRangesPerRegister * config()->num_float_registers(); @@ -3391,7 +3397,8 @@ void LinearScanAllocator::ComputeStateFromManyPredecessors( const int* codes = allocatable_register_codes(); MachineRepresentation rep = val.first->representation(); if (check_aliasing && (rep == MachineRepresentation::kFloat32 || - rep == MachineRepresentation::kSimd128)) + rep == MachineRepresentation::kSimd128 || + rep == MachineRepresentation::kSimd256)) GetFPRegisterSet(rep, &num_regs, &num_codes, &codes); for (int idx = 0; idx < num_regs; idx++) { int uses = val.second.used_registers[idx]; @@ -4005,6 +4012,10 @@ void LinearScanAllocator::GetFPRegisterSet(MachineRepresentation rep, *num_regs = data()->config()->num_simd128_registers(); *num_codes = data()->config()->num_allocatable_simd128_registers(); *codes = data()->config()->allocatable_simd128_codes(); + } else if (rep == MachineRepresentation::kSimd256) { + *num_regs = data()->config()->num_simd256_registers(); + *num_codes = data()->config()->num_allocatable_simd256_registers(); + *codes = data()->config()->allocatable_simd256_codes(); } else { UNREACHABLE(); } diff --git a/test/unittests/codegen/register-configuration-unittest.cc b/test/unittests/codegen/register-configuration-unittest.cc index cd96cfaa02..82189f7fef 100644 --- a/test/unittests/codegen/register-configuration-unittest.cc +++ b/test/unittests/codegen/register-configuration-unittest.cc @@ -27,8 +27,8 @@ TEST_F(RegisterConfigurationUnitTest, BasicProperties) { int double_codes[kNumAllocatableDoubleRegs] = {2, 3}; RegisterConfiguration test(AliasingKind::kOverlap, kNumGeneralRegs, - kNumDoubleRegs, 0, kNumAllocatableGeneralRegs, - kNumAllocatableDoubleRegs, 0, general_codes, + kNumDoubleRegs, 0, 0, kNumAllocatableGeneralRegs, + kNumAllocatableDoubleRegs, 0, 0, general_codes, double_codes); EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs); @@ -39,6 +39,10 @@ TEST_F(RegisterConfigurationUnitTest, BasicProperties) { EXPECT_EQ(test.num_allocatable_float_registers(), kNumAllocatableDoubleRegs); EXPECT_EQ(test.num_allocatable_simd128_registers(), kNumAllocatableDoubleRegs); +#if V8_TARGET_ARCH_X64 + EXPECT_EQ(test.num_allocatable_simd256_registers(), + kNumAllocatableDoubleRegs); +#endif EXPECT_EQ(test.allocatable_general_codes_mask(), (1 << general_codes[0]) | (1 << general_codes[1])); @@ -63,8 +67,8 @@ TEST_F(RegisterConfigurationUnitTest, CombineAliasing) { int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33. RegisterConfiguration test(AliasingKind::kCombine, kNumGeneralRegs, - kNumDoubleRegs, 0, kNumAllocatableGeneralRegs, - kNumAllocatableDoubleRegs, 0, general_codes, + kNumDoubleRegs, 0, 0, kNumAllocatableGeneralRegs, + kNumAllocatableDoubleRegs, 0, 0, general_codes, double_codes); // There are 3 allocatable double regs, but only 2 can alias float regs. diff --git a/test/unittests/compiler/backend/instruction-sequence-unittest.cc b/test/unittests/compiler/backend/instruction-sequence-unittest.cc index 5d049e04af..038bcb5e2e 100644 --- a/test/unittests/compiler/backend/instruction-sequence-unittest.cc +++ b/test/unittests/compiler/backend/instruction-sequence-unittest.cc @@ -25,9 +25,15 @@ InstructionSequenceTest::InstructionSequenceTest() num_general_registers_(Register::kNumRegisters), num_double_registers_(DoubleRegister::kNumRegisters), num_simd128_registers_(Simd128Register::kNumRegisters), +#if V8_TARGET_ARCH_X64 + num_simd256_registers_(Simd256Register::kNumRegisters), +#else + num_simd256_registers_(0), +#endif // V8_TARGET_ARCH_X64 instruction_blocks_(zone()), current_block_(nullptr), - block_returns_(false) {} + block_returns_(false) { +} void InstructionSequenceTest::SetNumRegs(int num_general_registers, int num_double_registers) { @@ -48,6 +54,8 @@ int InstructionSequenceTest::GetNumRegs(MachineRepresentation rep) { return config()->num_double_registers(); case MachineRepresentation::kSimd128: return config()->num_simd128_registers(); + case MachineRepresentation::kSimd256: + return config()->num_simd256_registers(); default: return config()->num_general_registers(); } @@ -62,6 +70,8 @@ int InstructionSequenceTest::GetAllocatableCode(int index, return config()->GetAllocatableDoubleCode(index); case MachineRepresentation::kSimd128: return config()->GetAllocatableSimd128Code(index); + case MachineRepresentation::kSimd256: + return config()->GetAllocatableSimd256Code(index); default: return config()->GetAllocatableGeneralCode(index); } @@ -71,9 +81,10 @@ const RegisterConfiguration* InstructionSequenceTest::config() { if (!config_) { config_.reset(new RegisterConfiguration( kFPAliasing, num_general_registers_, num_double_registers_, - num_simd128_registers_, num_general_registers_, num_double_registers_, - num_simd128_registers_, kAllocatableCodes.data(), - kAllocatableCodes.data(), kAllocatableCodes.data())); + num_simd128_registers_, num_simd256_registers_, num_general_registers_, + num_double_registers_, num_simd128_registers_, num_simd256_registers_, + kAllocatableCodes.data(), kAllocatableCodes.data(), + kAllocatableCodes.data())); } return config_.get(); } diff --git a/test/unittests/compiler/backend/instruction-sequence-unittest.h b/test/unittests/compiler/backend/instruction-sequence-unittest.h index f624b91ac4..e431078591 100644 --- a/test/unittests/compiler/backend/instruction-sequence-unittest.h +++ b/test/unittests/compiler/backend/instruction-sequence-unittest.h @@ -280,6 +280,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone { int num_general_registers_; int num_double_registers_; int num_simd128_registers_; + int num_simd256_registers_; // Block building state. InstructionBlocks instruction_blocks_; From a4afe1a09fc7a7bf986abbec4b17a3725d01cad3 Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Thu, 8 Sep 2022 20:16:08 -0700 Subject: [PATCH 0007/1772] [Temporal] Remove passing tests Bug: v8:11544 Change-Id: I23435db7f625ee35f560fd84ee98d481081fb5ff Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3868513 Commit-Queue: Frank Tang Reviewed-by: Shu-yu Guo Cr-Commit-Position: refs/heads/main@{#83092} --- test/test262/test262.status | 1 - 1 file changed, 1 deletion(-) diff --git a/test/test262/test262.status b/test/test262/test262.status index b44c1493d0..a9c2852b63 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -583,7 +583,6 @@ 'intl402/Temporal/Calendar/prototype/yearMonthFromFields/order-of-operations': [FAIL], 'intl402/Temporal/Duration/compare/relativeto-hour': [FAIL], 'built-ins/Temporal/PlainTime/prototype/equals/argument-string-no-implicit-midnight': [FAIL], - 'built-ins/Temporal/Duration/prototype/round/total-duration-nanoseconds-too-large-with-zoned-datetime': [PASS, FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-with-fractional-days-different-sign': [FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-with-fractional-days': [FAIL], 'intl402/Temporal/TimeZone/prototype/getNextTransition/subtract-second-and-nanosecond-from-last-transition': [FAIL], From e24cb41f08930ee696201cba1cff8ad87489b7a3 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Thu, 8 Sep 2022 20:44:31 -0700 Subject: [PATCH 0008/1772] Update ICU (trusted) Rolling v8/third_party/icu: https://chromium.googlesource.com/chromium/deps/icu/+log/bbdc7d8..20f8ac6 Make references to //third_party/icu relative (Filip Filmar) https://chromium.googlesource.com/chromium/deps/icu/+/20f8ac6 R=v8-waterfall-sheriff@grotations.appspotmail.com,mtv-sf-v8-sheriff@grotations.appspotmail.com,ftang@chromium.org Change-Id: I87063f9ec7b4ef8491c43ad8e1902e2741dd0e49 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3886397 Bot-Commit: v8-ci-autoroll-builder Commit-Queue: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#83093} --- DEPS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DEPS b/DEPS index 70a1f94b82..efd9e57ad1 100644 --- a/DEPS +++ b/DEPS @@ -223,7 +223,7 @@ deps = { 'third_party/googletest/src': Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'bbdc7d8936bd9b896ff9c9822b697554b73c1c9d', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '20f8ac695af59b6c830def7d4e95bfeb13dd7be5', 'third_party/instrumented_libraries': Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'e09c4b66b6e87116eb190651421f1a6e2f3b9c52', 'third_party/ittapi': { From 3eb65be78c702e8c9fb046fa91f48be881d93408 Mon Sep 17 00:00:00 2001 From: Danil Somsikov Date: Thu, 8 Sep 2022 15:46:37 +0200 Subject: [PATCH 0009/1772] Enable Profiler domain for the untrusted clients. Bug: chromium:1350125 Change-Id: Ia89d01420e93e110a5da22f104f5b8afbdd2f558 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3882973 Commit-Queue: Benedikt Meurer Auto-Submit: Danil Somsikov Reviewed-by: Benedikt Meurer Cr-Commit-Position: refs/heads/main@{#83094} --- src/inspector/v8-inspector-session-impl.cc | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/inspector/v8-inspector-session-impl.cc b/src/inspector/v8-inspector-session-impl.cc index 78d8c15553..f7f3e10938 100644 --- a/src/inspector/v8-inspector-session-impl.cc +++ b/src/inspector/v8-inspector-session-impl.cc @@ -127,11 +127,11 @@ V8InspectorSessionImpl::V8InspectorSessionImpl( this, this, agentState(protocol::Console::Metainfo::domainName))); protocol::Console::Dispatcher::wire(&m_dispatcher, m_consoleAgent.get()); - if (m_clientTrustLevel == V8Inspector::kFullyTrusted) { - m_profilerAgent.reset(new V8ProfilerAgentImpl( - this, this, agentState(protocol::Profiler::Metainfo::domainName))); - protocol::Profiler::Dispatcher::wire(&m_dispatcher, m_profilerAgent.get()); + m_profilerAgent.reset(new V8ProfilerAgentImpl( + this, this, agentState(protocol::Profiler::Metainfo::domainName))); + protocol::Profiler::Dispatcher::wire(&m_dispatcher, m_profilerAgent.get()); + if (m_clientTrustLevel == V8Inspector::kFullyTrusted) { m_heapProfilerAgent.reset(new V8HeapProfilerAgentImpl( this, this, agentState(protocol::HeapProfiler::Metainfo::domainName))); protocol::HeapProfiler::Dispatcher::wire(&m_dispatcher, @@ -145,7 +145,7 @@ V8InspectorSessionImpl::V8InspectorSessionImpl( m_runtimeAgent->restore(); m_debuggerAgent->restore(); if (m_heapProfilerAgent) m_heapProfilerAgent->restore(); - if (m_profilerAgent) m_profilerAgent->restore(); + m_profilerAgent->restore(); m_consoleAgent->restore(); } } @@ -154,7 +154,7 @@ V8InspectorSessionImpl::~V8InspectorSessionImpl() { v8::Isolate::Scope scope(m_inspector->isolate()); discardInjectedScripts(); m_consoleAgent->disable(); - if (m_profilerAgent) m_profilerAgent->disable(); + m_profilerAgent->disable(); if (m_heapProfilerAgent) m_heapProfilerAgent->disable(); m_debuggerAgent->disable(); m_runtimeAgent->disable(); @@ -503,8 +503,7 @@ V8InspectorSessionImpl::searchInTextByLines(StringView text, StringView query, void V8InspectorSessionImpl::triggerPreciseCoverageDeltaUpdate( StringView occasion) { - if (m_profilerAgent) - m_profilerAgent->triggerPreciseCoverageDeltaUpdate(toString16(occasion)); + m_profilerAgent->triggerPreciseCoverageDeltaUpdate(toString16(occasion)); } } // namespace v8_inspector From 85b5fbc77ab86ce2ff6f6f65cedb8524d7e2ae2e Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Fri, 9 Sep 2022 11:41:57 +0800 Subject: [PATCH 0010/1772] [loong64][mips64][log][compiler] Enable first-execution logging Port commit c0f420ef5cdd9723d79d4fec7dea31d2e81edc41 Change-Id: I061da980f39888f0f43e2746c8c37d683b200a95 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885381 Auto-Submit: Liu Yu Reviewed-by: Zhao Jiazhong Commit-Queue: Zhao Jiazhong Cr-Commit-Position: refs/heads/main@{#83095} --- src/codegen/loong64/macro-assembler-loong64.cc | 14 ++++++++++++-- src/codegen/mips64/macro-assembler-mips64.cc | 14 ++++++++++++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/src/codegen/loong64/macro-assembler-loong64.cc b/src/codegen/loong64/macro-assembler-loong64.cc index b0d7a39646..2822a0e205 100644 --- a/src/codegen/loong64/macro-assembler-loong64.cc +++ b/src/codegen/loong64/macro-assembler-loong64.cc @@ -4274,18 +4274,28 @@ void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( Register flags, Register feedback_vector) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(flags, feedback_vector)); - Label maybe_has_optimized_code; + Label maybe_has_optimized_code, maybe_needs_logging; // Check if optimized code marker is available. { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); And(scratch, flags, Operand(FeedbackVector::kFlagsTieringStateIsAnyRequested)); - Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg)); + Branch(&maybe_needs_logging, eq, scratch, Operand(zero_reg)); } GenerateTailCallToReturnedCode(Runtime::kCompileOptimized); + bind(&maybe_needs_logging); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + And(scratch, flags, Operand(FeedbackVector::LogNextExecutionBit::kMask)); + Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg)); + } + + GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution); + bind(&maybe_has_optimized_code); Register optimized_code_entry = flags; Ld_d(optimized_code_entry, diff --git a/src/codegen/mips64/macro-assembler-mips64.cc b/src/codegen/mips64/macro-assembler-mips64.cc index 5d162fbed9..24e6fee51a 100644 --- a/src/codegen/mips64/macro-assembler-mips64.cc +++ b/src/codegen/mips64/macro-assembler-mips64.cc @@ -6318,18 +6318,28 @@ void MacroAssembler::LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( Register flags, Register feedback_vector) { ASM_CODE_COMMENT(this); - Label maybe_has_optimized_code; + Label maybe_has_optimized_code, maybe_needs_logging; // Check if optimized code marker is available. { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); And(scratch, flags, Operand(FeedbackVector::kFlagsTieringStateIsAnyRequested)); - Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg)); + Branch(&maybe_needs_logging, eq, scratch, Operand(zero_reg)); } GenerateTailCallToReturnedCode(Runtime::kCompileOptimized); + bind(&maybe_needs_logging); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + And(scratch, flags, Operand(FeedbackVector::LogNextExecutionBit::kMask)); + Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg)); + } + + GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution); + bind(&maybe_has_optimized_code); Register optimized_code_entry = flags; Ld(optimized_code_entry, From 72d6dc6d5e5b43f9b09cb5cc125241fcb62a674c Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Fri, 9 Sep 2022 09:26:09 +0200 Subject: [PATCH 0011/1772] [heap] Do precise search in free list for new space In case the free list fast path fails, do a precise search through the precise category for the current allocation. Bug: v8:12612 Change-Id: I120e64b0d09b9cf5a776188180d6e6c53c44886b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879494 Reviewed-by: Michael Lippautz Commit-Queue: Omer Katz Cr-Commit-Position: refs/heads/main@{#83096} --- src/heap/free-list.cc | 47 +++++++++++++++++++++++++++++++++++++++--- src/heap/free-list.h | 29 +++++++++++++++++++++++++- src/heap/new-spaces.cc | 2 +- 3 files changed, 73 insertions(+), 5 deletions(-) diff --git a/src/heap/free-list.cc b/src/heap/free-list.cc index 512f6759d7..65104330a0 100644 --- a/src/heap/free-list.cc +++ b/src/heap/free-list.cc @@ -111,6 +111,10 @@ void FreeListCategory::Relink(FreeList* owner) { FreeList* FreeList::CreateFreeList() { return new FreeListManyCachedOrigin(); } +FreeList* FreeList::CreateFreeListForNewSpace() { + return new FreeListManyCachedOriginForNewSpace(); +} + FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type, size_t minimum_size, size_t* node_size) { FreeListCategory* category = categories_[type]; @@ -361,12 +365,14 @@ FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes, // Fast path part 2: searching the medium categories for tiny objects if (node.is_null()) { if (size_in_bytes <= kTinyObjectMaxSize) { + DCHECK_EQ(kFastPathFirstCategory, first_category); for (type = next_nonempty_category[kFastPathFallBackTiny]; type < kFastPathFirstCategory; type = next_nonempty_category[type + 1]) { node = TryFindNodeIn(type, size_in_bytes, node_size); if (!node.is_null()) break; } + first_category = kFastPathFallBackTiny; } } @@ -387,19 +393,41 @@ FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes, } } - // Updating cache - if (!node.is_null() && categories_[type] == nullptr) { - UpdateCacheAfterRemoval(type); + if (!node.is_null()) { + if (categories_[type] == nullptr) UpdateCacheAfterRemoval(type); + Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size); } #ifdef DEBUG CheckCacheIntegrity(); #endif + DCHECK(IsVeryLong() || Available() == SumFreeLists()); + return node; +} + +// ------------------------------------------------ +// FreeListManyCachedFastPath implementation + +FreeSpace FreeListManyCachedFastPathForNewSpace::Allocate( + size_t size_in_bytes, size_t* node_size, AllocationOrigin origin) { + FreeSpace node = + FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size, origin); + if (!node.is_null()) return node; + + // Search through the precise category for a fit + FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes); + node = SearchForNodeInList(type, size_in_bytes, node_size); + if (!node.is_null()) { + if (categories_[type] == nullptr) UpdateCacheAfterRemoval(type); Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size); } +#ifdef DEBUG + CheckCacheIntegrity(); +#endif + DCHECK(IsVeryLong() || Available() == SumFreeLists()); return node; } @@ -418,6 +446,19 @@ FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes, } } +// ------------------------------------------------ +// FreeListManyCachedOrigin implementation + +FreeSpace FreeListManyCachedOriginForNewSpace::Allocate( + size_t size_in_bytes, size_t* node_size, AllocationOrigin origin) { + if (origin == AllocationOrigin::kGC) { + return FreeListManyCached::Allocate(size_in_bytes, node_size, origin); + } else { + return FreeListManyCachedOriginForNewSpace::Allocate(size_in_bytes, + node_size, origin); + } +} + // ------------------------------------------------ // Generic FreeList methods (non alloc/free related) diff --git a/src/heap/free-list.h b/src/heap/free-list.h index 896eed570b..c8fb58a36a 100644 --- a/src/heap/free-list.h +++ b/src/heap/free-list.h @@ -122,8 +122,10 @@ class FreeListCategory { // categories would scatter allocation more. class FreeList { public: - // Creates a Freelist of the default class (FreeListLegacy for now). + // Creates a Freelist of the default class. V8_EXPORT_PRIVATE static FreeList* CreateFreeList(); + // Creates a Freelist for new space. + V8_EXPORT_PRIVATE static FreeList* CreateFreeListForNewSpace(); virtual ~FreeList() = default; @@ -473,6 +475,21 @@ class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached { FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType); }; +// Same as FreeListManyCachedFastPath but falls back to a precise search of the +// precise category in case allocation fails. Because new space is relatively +// small, the free list is also relatively small and larger categories are more +// likely to be empty. The precise search is meant to avoid an allocation +// failure and thus avoid GCs. +class V8_EXPORT_PRIVATE FreeListManyCachedFastPathForNewSpace + : public FreeListManyCachedFastPath { + public: + V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes, + size_t* node_size, + AllocationOrigin origin) override; + + protected: +}; + // Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise. // The reasoning behind this FreeList is the following: the GC runs in // parallel, and therefore, more expensive allocations there are less @@ -489,6 +506,16 @@ class V8_EXPORT_PRIVATE FreeListManyCachedOrigin AllocationOrigin origin) override; }; +// Similar to FreeListManyCachedOrigin but uses +// FreeListManyCachedFastPathForNewSpace for allocations outside the GC. +class V8_EXPORT_PRIVATE FreeListManyCachedOriginForNewSpace + : public FreeListManyCachedFastPathForNewSpace { + public: + V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes, + size_t* node_size, + AllocationOrigin origin) override; +}; + } // namespace internal } // namespace v8 diff --git a/src/heap/new-spaces.cc b/src/heap/new-spaces.cc index 89f7501655..719d05e26b 100644 --- a/src/heap/new-spaces.cc +++ b/src/heap/new-spaces.cc @@ -902,7 +902,7 @@ PagedSpaceForNewSpace::PagedSpaceForNewSpace( LinearAllocationArea& allocation_info, LinearAreaOriginalData& linear_area_original_data) : PagedSpaceBase(heap, NEW_SPACE, NOT_EXECUTABLE, - FreeList::CreateFreeList(), allocation_counter, + FreeList::CreateFreeListForNewSpace(), allocation_counter, allocation_info, linear_area_original_data, CompactionSpaceKind::kNone), initial_capacity_(RoundDown(initial_capacity, Page::kPageSize)), From b120f3e60afaa8da2a90f80a67b90351f3c888a9 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Fri, 9 Sep 2022 09:17:51 +0200 Subject: [PATCH 0012/1772] [ic] Fix getter-in-IC for LoadAccessMode::kHas case When testing for "has" rather than loading, we can immediately return true when there is an accessor (this is already what the LoadHandler path does but was missed in the inlined case). Fixed: chromium:1361434 Fixed: chromium:1361560 Fixed: chromium:1361566 Fixed: chromium:1361654 Fixed: chromium:1361830 Change-Id: I69073eccbb39b08da597297fa597f53f260b242e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885879 Auto-Submit: Leszek Swirski Reviewed-by: Igor Sheludko Commit-Queue: Igor Sheludko Cr-Commit-Position: refs/heads/main@{#83097} --- src/ic/accessor-assembler.cc | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/ic/accessor-assembler.cc b/src/ic/accessor-assembler.cc index f3d6a9d47d..c723f449c2 100644 --- a/src/ic/accessor-assembler.cc +++ b/src/ic/accessor-assembler.cc @@ -227,9 +227,13 @@ void AccessorAssembler::HandleLoadICHandlerCase( BIND(&call_getter); { - TNode strong_handler = GetHeapObjectAssumeWeak(handler, miss); - TNode getter = LoadAccessorPairGetter(CAST(strong_handler)); - exit_point->Return(Call(p->context(), getter, p->receiver())); + if (access_mode == LoadAccessMode::kHas) { + exit_point->Return(TrueConstant()); + } else { + TNode strong_handler = GetHeapObjectAssumeWeak(handler, miss); + TNode getter = LoadAccessorPairGetter(CAST(strong_handler)); + exit_point->Return(Call(p->context(), getter, p->receiver())); + } } BIND(&call_code_handler); From de391acf3462716b82b9c5d7fec58a191555cad2 Mon Sep 17 00:00:00 2001 From: Al Muthanna Athamina Date: Thu, 8 Sep 2022 16:09:49 +0200 Subject: [PATCH 0013/1772] Allow interrupt budget fuzzer to run tests Bug: v8:13269 Change-Id: I0f35101bd4b8a91ed5aa596cb5d27a5dbb5f764e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3882976 Reviewed-by: Michael Achenbach Commit-Queue: Almothana Athamneh Cr-Commit-Position: refs/heads/main@{#83098} --- tools/testrunner/testproc/fuzzer.py | 20 ++++++++++---------- tools/testrunner/utils/augmented_options.py | 1 + 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/tools/testrunner/testproc/fuzzer.py b/tools/testrunner/testproc/fuzzer.py index 475b332d83..6c552d83ff 100644 --- a/tools/testrunner/testproc/fuzzer.py +++ b/tools/testrunner/testproc/fuzzer.py @@ -324,7 +324,7 @@ class InterruptBudgetFuzzer(Fuzzer): # Half with, half without lazy feedback allocation. The first flag # overwrites potential flag negations from the extra flags list. flag1 = rng.choice( - '--lazy-feedback-allocation', '--no-lazy-feedback-allocation') + ['--lazy-feedback-allocation', '--no-lazy-feedback-allocation']) flag2 = '--interrupt-budget=%d' % rng.randint(0, 135168) flag3 = '--interrupt-budget-for-maglev=%d' % rng.randint(0, 40960) flag4 = '--interrupt-budget-for-feedback-allocation=%d' % rng.randint( @@ -391,15 +391,15 @@ class DeoptFuzzer(Fuzzer): FUZZERS = { - 'compaction': (None, CompactionFuzzer), - 'delay': (None, TaskDelayFuzzer), - 'deopt': (DeoptAnalyzer, DeoptFuzzer), - 'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer), - 'interrupt': InterruptBudgetFuzzer, - 'marking': (MarkingAnalyzer, MarkingFuzzer), - 'scavenge': (ScavengeAnalyzer, ScavengeFuzzer), - 'stack': (None, StackSizeFuzzer), - 'threads': (None, ThreadPoolSizeFuzzer), + 'compaction': (None, CompactionFuzzer), + 'delay': (None, TaskDelayFuzzer), + 'deopt': (DeoptAnalyzer, DeoptFuzzer), + 'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer), + 'interrupt': (None, InterruptBudgetFuzzer), + 'marking': (MarkingAnalyzer, MarkingFuzzer), + 'scavenge': (ScavengeAnalyzer, ScavengeFuzzer), + 'stack': (None, StackSizeFuzzer), + 'threads': (None, ThreadPoolSizeFuzzer), } diff --git a/tools/testrunner/utils/augmented_options.py b/tools/testrunner/utils/augmented_options.py index 52f5f46ae8..db6aef4b5e 100644 --- a/tools/testrunner/utils/augmented_options.py +++ b/tools/testrunner/utils/augmented_options.py @@ -62,6 +62,7 @@ class AugmentedOptions(optparse.Values): fuzzers.append(fuzzer.create_fuzzer_config(name, prob, *args)) add('compaction', self.stress_compaction) + add('interrupt', self.stress_interrupt_budget) add('marking', self.stress_marking) add('scavenge', self.stress_scavenge) add('gc_interval', self.stress_gc) From 0e456ec6918ea79a17f79a9aad2a14535c1b2b65 Mon Sep 17 00:00:00 2001 From: Victor Gomes Date: Fri, 9 Sep 2022 09:03:26 +0200 Subject: [PATCH 0014/1772] Re-enable octane/typescript for deopt_fuzzer Bug: v8:12445 Change-Id: Iec07b49986a6ceff3842b55af24d375149930a91 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885877 Auto-Submit: Victor Gomes Reviewed-by: Almothana Athamneh Commit-Queue: Almothana Athamneh Cr-Commit-Position: refs/heads/main@{#83099} --- test/benchmarks/benchmarks.status | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/benchmarks/benchmarks.status b/test/benchmarks/benchmarks.status index a4e77c0de5..022a47a0b5 100644 --- a/test/benchmarks/benchmarks.status +++ b/test/benchmarks/benchmarks.status @@ -109,9 +109,6 @@ ['gc_fuzzer or deopt_fuzzer', { # BUG(v8:12901) Skipped until bug is fixed. 'octane/gbemu-part1': [SKIP], - - # BUG(v8:12445) Flaky - 'octane/typescript': [SKIP], }], # gc_fuzzer or deopt_fuzzer ['predictable', { From 4420804037b362f7600a90d1220e0491df28d0af Mon Sep 17 00:00:00 2001 From: Matthias Liedtke Date: Fri, 9 Sep 2022 09:10:58 +0000 Subject: [PATCH 0015/1772] Revert "[heap] Do precise search in free list for new space" This reverts commit 72d6dc6d5e5b43f9b09cb5cc125241fcb62a674c. Reason for revert: UBSan failure in PagedNewSpace: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20UBSan/23130/overview Original change's description: > [heap] Do precise search in free list for new space > > In case the free list fast path fails, do a precise search through the > precise category for the current allocation. > > Bug: v8:12612 > Change-Id: I120e64b0d09b9cf5a776188180d6e6c53c44886b > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879494 > Reviewed-by: Michael Lippautz > Commit-Queue: Omer Katz > Cr-Commit-Position: refs/heads/main@{#83096} Bug: v8:12612 Change-Id: Ife4a41fa835e61a6d9f0f1c254900288b805f41f No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885884 Commit-Queue: Matthias Liedtke Owners-Override: Matthias Liedtke Bot-Commit: Rubber Stamper Cr-Commit-Position: refs/heads/main@{#83100} --- src/heap/free-list.cc | 47 +++--------------------------------------- src/heap/free-list.h | 29 +------------------------- src/heap/new-spaces.cc | 2 +- 3 files changed, 5 insertions(+), 73 deletions(-) diff --git a/src/heap/free-list.cc b/src/heap/free-list.cc index 65104330a0..512f6759d7 100644 --- a/src/heap/free-list.cc +++ b/src/heap/free-list.cc @@ -111,10 +111,6 @@ void FreeListCategory::Relink(FreeList* owner) { FreeList* FreeList::CreateFreeList() { return new FreeListManyCachedOrigin(); } -FreeList* FreeList::CreateFreeListForNewSpace() { - return new FreeListManyCachedOriginForNewSpace(); -} - FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type, size_t minimum_size, size_t* node_size) { FreeListCategory* category = categories_[type]; @@ -365,14 +361,12 @@ FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes, // Fast path part 2: searching the medium categories for tiny objects if (node.is_null()) { if (size_in_bytes <= kTinyObjectMaxSize) { - DCHECK_EQ(kFastPathFirstCategory, first_category); for (type = next_nonempty_category[kFastPathFallBackTiny]; type < kFastPathFirstCategory; type = next_nonempty_category[type + 1]) { node = TryFindNodeIn(type, size_in_bytes, node_size); if (!node.is_null()) break; } - first_category = kFastPathFallBackTiny; } } @@ -393,41 +387,19 @@ FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes, } } - if (!node.is_null()) { - if (categories_[type] == nullptr) UpdateCacheAfterRemoval(type); - Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size); + // Updating cache + if (!node.is_null() && categories_[type] == nullptr) { + UpdateCacheAfterRemoval(type); } #ifdef DEBUG CheckCacheIntegrity(); #endif - DCHECK(IsVeryLong() || Available() == SumFreeLists()); - return node; -} - -// ------------------------------------------------ -// FreeListManyCachedFastPath implementation - -FreeSpace FreeListManyCachedFastPathForNewSpace::Allocate( - size_t size_in_bytes, size_t* node_size, AllocationOrigin origin) { - FreeSpace node = - FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size, origin); - if (!node.is_null()) return node; - - // Search through the precise category for a fit - FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes); - node = SearchForNodeInList(type, size_in_bytes, node_size); - if (!node.is_null()) { - if (categories_[type] == nullptr) UpdateCacheAfterRemoval(type); Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size); } -#ifdef DEBUG - CheckCacheIntegrity(); -#endif - DCHECK(IsVeryLong() || Available() == SumFreeLists()); return node; } @@ -446,19 +418,6 @@ FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes, } } -// ------------------------------------------------ -// FreeListManyCachedOrigin implementation - -FreeSpace FreeListManyCachedOriginForNewSpace::Allocate( - size_t size_in_bytes, size_t* node_size, AllocationOrigin origin) { - if (origin == AllocationOrigin::kGC) { - return FreeListManyCached::Allocate(size_in_bytes, node_size, origin); - } else { - return FreeListManyCachedOriginForNewSpace::Allocate(size_in_bytes, - node_size, origin); - } -} - // ------------------------------------------------ // Generic FreeList methods (non alloc/free related) diff --git a/src/heap/free-list.h b/src/heap/free-list.h index c8fb58a36a..896eed570b 100644 --- a/src/heap/free-list.h +++ b/src/heap/free-list.h @@ -122,10 +122,8 @@ class FreeListCategory { // categories would scatter allocation more. class FreeList { public: - // Creates a Freelist of the default class. + // Creates a Freelist of the default class (FreeListLegacy for now). V8_EXPORT_PRIVATE static FreeList* CreateFreeList(); - // Creates a Freelist for new space. - V8_EXPORT_PRIVATE static FreeList* CreateFreeListForNewSpace(); virtual ~FreeList() = default; @@ -475,21 +473,6 @@ class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached { FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType); }; -// Same as FreeListManyCachedFastPath but falls back to a precise search of the -// precise category in case allocation fails. Because new space is relatively -// small, the free list is also relatively small and larger categories are more -// likely to be empty. The precise search is meant to avoid an allocation -// failure and thus avoid GCs. -class V8_EXPORT_PRIVATE FreeListManyCachedFastPathForNewSpace - : public FreeListManyCachedFastPath { - public: - V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes, - size_t* node_size, - AllocationOrigin origin) override; - - protected: -}; - // Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise. // The reasoning behind this FreeList is the following: the GC runs in // parallel, and therefore, more expensive allocations there are less @@ -506,16 +489,6 @@ class V8_EXPORT_PRIVATE FreeListManyCachedOrigin AllocationOrigin origin) override; }; -// Similar to FreeListManyCachedOrigin but uses -// FreeListManyCachedFastPathForNewSpace for allocations outside the GC. -class V8_EXPORT_PRIVATE FreeListManyCachedOriginForNewSpace - : public FreeListManyCachedFastPathForNewSpace { - public: - V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes, - size_t* node_size, - AllocationOrigin origin) override; -}; - } // namespace internal } // namespace v8 diff --git a/src/heap/new-spaces.cc b/src/heap/new-spaces.cc index 719d05e26b..89f7501655 100644 --- a/src/heap/new-spaces.cc +++ b/src/heap/new-spaces.cc @@ -902,7 +902,7 @@ PagedSpaceForNewSpace::PagedSpaceForNewSpace( LinearAllocationArea& allocation_info, LinearAreaOriginalData& linear_area_original_data) : PagedSpaceBase(heap, NEW_SPACE, NOT_EXECUTABLE, - FreeList::CreateFreeListForNewSpace(), allocation_counter, + FreeList::CreateFreeList(), allocation_counter, allocation_info, linear_area_original_data, CompactionSpaceKind::kNone), initial_capacity_(RoundDown(initial_capacity, Page::kPageSize)), From cd1ee28be835788a7c1d22e1164349f8ad9b5a87 Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Thu, 8 Sep 2022 15:43:40 +0200 Subject: [PATCH 0016/1772] [test] Fix occasional hangs on pool termination On termination of the worker pool in the main process, a SIGTERM is sent from pool to worker. It was meant to terminate long-running tests in the worker process. The signal handler on the worker side, however, was only registered during test execution. During the remaining logic (<1% of the time probably) the default system behavior for SIGTERM would be used (which will likely just kill the process). The ungracefully killed process might be killed while writing to the results queue, which then remains with corrupted data. Later when the main process cleans up the queue, it hangs. We now register a default handler in the worker process that catches the SIGTERM and also gracefully stops the processing loop. Like that, the SIGTERM signal will always be handled in workers and never fall back to SIGKILL. However, a small time window exists when the SIGTERM was caught right when starting a test process, but when the test-abort handler was not registered yet. We keep fixing this as a TODO. Worst case, the main process will block until the last test run is done. Bug: v8:13113 Change-Id: Ib60f82c6a1569da042c9f44f7b516e2f40a46f93 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3882972 Reviewed-by: Alexander Schulze Commit-Queue: Michael Achenbach Cr-Commit-Position: refs/heads/main@{#83101} --- tools/testrunner/local/command.py | 17 ++++++++++++++--- tools/testrunner/local/pool.py | 19 ++++++++++++++++++- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/tools/testrunner/local/command.py b/tools/testrunner/local/command.py index 912815cc39..e0ef281b4c 100644 --- a/tools/testrunner/local/command.py +++ b/tools/testrunner/local/command.py @@ -43,11 +43,22 @@ def handle_sigterm(process, abort_fun, enabled): """ # Variable to communicate with the signal handler. abort_occured = [False] - def handler(signum, frame): - abort_fun(process, abort_occured) if enabled: - previous = signal.signal(signal.SIGTERM, handler) + # TODO(https://crbug.com/v8/13113): There is a race condition on + # signal handler registration. In rare cases, the SIGTERM for stopping + # a worker might be caught right after a long running process has been + # started (or logic that starts it isn't interrupted), but before the + # registration of the abort_fun. In this case, process.communicate will + # block until the process is done. + previous = signal.getsignal(signal.SIGTERM) + def handler(signum, frame): + abort_fun(process, abort_occured) + if previous and callable(previous): + # Call default signal handler. If this command is called from a worker + # process, its signal handler will gracefully stop processing. + previous(signum, frame) + signal.signal(signal.SIGTERM, handler) try: yield finally: diff --git a/tools/testrunner/local/pool.py b/tools/testrunner/local/pool.py index 766a6b3526..5761c73669 100644 --- a/tools/testrunner/local/pool.py +++ b/tools/testrunner/local/pool.py @@ -66,15 +66,29 @@ def Worker(fn, work_queue, done_queue, """Worker to be run in a child process. The worker stops when the poison pill "STOP" is reached. """ + # Install a default signal handler for SIGTERM that stops the processing + # loop below on the next occasion. The job function "fn" is supposed to + # register their own handler to avoid blocking, but still chain to this + # handler on SIGTERM to terminate the loop quickly. + stop = [False] + def handler(signum, frame): + stop[0] = True + signal.signal(signal.SIGTERM, handler) + try: kwargs = {} if process_context_fn and process_context_args is not None: kwargs.update(process_context=process_context_fn(*process_context_args)) for args in iter(work_queue.get, "STOP"): + if stop[0]: + # SIGINT, SIGTERM or internal hard timeout caught outside the execution + # of "fn". + break try: done_queue.put(NormalResult(fn(*args, **kwargs))) except AbortException: - # SIGINT, SIGTERM or internal hard timeout. + # SIGINT, SIGTERM or internal hard timeout caught during execution of + # "fn". break except Exception as e: logging.exception('Unhandled error during worker execution.') @@ -309,6 +323,9 @@ class DefaultExecutionPool(ContextPool): # per worker to make them stop. self.work_queue.put("STOP") + # Send a SIGTERM to all workers. They will gracefully terminate their + # processing loop and if the signal is caught during job execution they + # will try to terminate the ongoing test processes quickly. if self.abort_now: self._terminate_processes() From 6f9e71fa74eb589a48c0f5065ac961a64cb515a3 Mon Sep 17 00:00:00 2001 From: Thibaud Michaud Date: Thu, 8 Sep 2022 15:59:09 +0200 Subject: [PATCH 0017/1772] [wasm][liftoff] Fix and cleanup tracing of return value - Fix tracing of reference return values. StoreTaggedPointer should not use the write barrier since we are writing to the stack. - Avoid re-allocating a slot for the return value when it is already spilled. R=manoskouk@chromium.org Change-Id: I6418c48332964a1c3d407abafaf466b0e789be69 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3882971 Commit-Queue: Thibaud Michaud Reviewed-by: Manos Koukoutos Cr-Commit-Position: refs/heads/main@{#83102} --- src/wasm/baseline/arm/liftoff-assembler-arm.h | 4 +++ .../baseline/arm64/liftoff-assembler-arm64.h | 4 +++ .../baseline/ia32/liftoff-assembler-ia32.h | 4 +++ src/wasm/baseline/liftoff-assembler.h | 1 + src/wasm/baseline/liftoff-compiler.cc | 26 +++++-------------- src/wasm/baseline/x64/liftoff-assembler-x64.h | 4 +++ 6 files changed, 24 insertions(+), 19 deletions(-) diff --git a/src/wasm/baseline/arm/liftoff-assembler-arm.h b/src/wasm/baseline/arm/liftoff-assembler-arm.h index 9474c65c4f..74438b7a35 100644 --- a/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -1550,6 +1550,10 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { pop(r0); } +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { + sub(dst, fp, Operand(offset)); +} + #define I32_BINOP(name, instruction) \ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \ Register rhs) { \ diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index e0fef9b36d..403dd61687 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -1033,6 +1033,10 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { + Sub(dst, fp, offset); +} + #define I32_BINOP(name, instruction) \ void LiftoffAssembler::emit_##name(Register dst, Register lhs, \ Register rhs) { \ diff --git a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index f82fdda07a..6c5e3a0788 100644 --- a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -1286,6 +1286,10 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { + lea(dst, liftoff::GetStackSlot(offset)); +} + void LiftoffAssembler::emit_i32_add(Register dst, Register lhs, Register rhs) { if (lhs != dst) { lea(dst, Operand(lhs, rhs, times_1, 0)); diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index 1f6340b61e..562cadd612 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -658,6 +658,7 @@ class LiftoffAssembler : public TurboAssembler { void Spill(VarState* slot); void SpillLocals(); void SpillAllRegisters(); + inline void LoadSpillAddress(Register dst, int offset); // Clear any uses of {reg} in both the cache and in {possible_uses}. // Any use in the stack is spilled. If any register in {possible_uses} matches diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index 24ed282420..1652f1678a 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -2345,40 +2345,28 @@ class LiftoffCompiler { CODE_COMMENT("trace function exit"); // Before making the runtime call, spill all cache registers. __ SpillAllRegisters(); - LiftoffRegList pinned; - // Get a register to hold the stack slot for the return value. - LiftoffRegister info = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - __ AllocateStackSlot(info.gp(), sizeof(int64_t)); // Store the return value if there is exactly one. Multiple return values // are not handled yet. size_t num_returns = decoder->sig_->return_count(); - if (num_returns == 1) { - ValueKind return_kind = decoder->sig_->GetReturn(0).kind(); - LiftoffRegister return_reg = - __ LoadToRegister(__ cache_state()->stack_state.back(), pinned); - if (is_reference(return_kind)) { - __ StoreTaggedPointer(info.gp(), no_reg, 0, return_reg, pinned); - } else { - __ Store(info.gp(), no_reg, 0, return_reg, - StoreType::ForValueKind(return_kind), pinned); - } - } // Put the parameter in its place. WasmTraceExitDescriptor descriptor; DCHECK_EQ(0, descriptor.GetStackParameterCount()); DCHECK_EQ(1, descriptor.GetRegisterParameterCount()); Register param_reg = descriptor.GetRegisterParameter(0); - if (info.gp() != param_reg) { - __ Move(param_reg, info.gp(), kPointerKind); + if (num_returns == 1) { + auto& return_slot = __ cache_state()->stack_state.back(); + if (return_slot.is_const()) { + __ Spill(&return_slot); + } + DCHECK(return_slot.is_stack()); + __ LoadSpillAddress(param_reg, return_slot.offset()); } source_position_table_builder_.AddPosition( __ pc_offset(), SourcePosition(decoder->position()), false); __ CallRuntimeStub(WasmCode::kWasmTraceExit); DefineSafepoint(); - - __ DeallocateStackSlot(sizeof(int64_t)); } void TierupCheckOnTailCall(FullDecoder* decoder) { diff --git a/src/wasm/baseline/x64/liftoff-assembler-x64.h b/src/wasm/baseline/x64/liftoff-assembler-x64.h index 0af5753584..fe3e897c6b 100644 --- a/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -1018,6 +1018,10 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { + leaq(dst, liftoff::GetStackSlot(offset)); +} + void LiftoffAssembler::emit_trace_instruction(uint32_t markid) { Assembler::emit_trace_instruction(Immediate(markid)); } From ee95a9064a3f2c3c1f59b71a570487e6d97c682a Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Fri, 9 Sep 2022 12:12:13 +0200 Subject: [PATCH 0018/1772] Fix comparison between different signs Currently, compilation may fail in some configurations. Change-Id: I2fd6a71c4f43c66416429a9d3dbbf9970c68aeaf Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885886 Reviewed-by: Leszek Swirski Commit-Queue: Manos Koukoutos Cr-Commit-Position: refs/heads/main@{#83103} --- src/web-snapshot/web-snapshot.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/web-snapshot/web-snapshot.h b/src/web-snapshot/web-snapshot.h index 48249f6ec2..b96efeb0f0 100644 --- a/src/web-snapshot/web-snapshot.h +++ b/src/web-snapshot/web-snapshot.h @@ -120,7 +120,8 @@ class WebSnapshotSerializerDeserializer { static_cast(FixedArray::kMaxLength - 1); // This ensures indices and lengths can be converted between uint32_t and int // without problems: - static_assert(kMaxItemCount < std::numeric_limits::max()); + static_assert(kMaxItemCount < + static_cast(std::numeric_limits::max())); protected: explicit WebSnapshotSerializerDeserializer(Isolate* isolate) From 725e5bcf1e27929aa898997853b6a49da10b8c3d Mon Sep 17 00:00:00 2001 From: Leon Bettscheider Date: Fri, 9 Sep 2022 10:10:57 +0000 Subject: [PATCH 0019/1772] [heap] Reschedule minor concurrent marking in allocation observer This CL reschedules minor concurrent marking in MinorMCTaskObserver. This allows to make continuous concurrent marking progress. Bug: v8:13012 Change-Id: I5cc4e02a60993dd5ce970244274d4d5f99b4a550 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885885 Reviewed-by: Omer Katz Commit-Queue: Leon Bettscheider Cr-Commit-Position: refs/heads/main@{#83104} --- src/heap/heap.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 0fcd578430..5a307ff9e1 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -199,6 +199,13 @@ class MinorMCTaskObserver final : public AllocationObserver { : AllocationObserver(step_size), heap_(heap) {} void Step(int bytes_allocated, Address, size_t) override { + if (v8_flags.concurrent_minor_mc) { + if (heap_->incremental_marking()->IsMinorMarking()) { + heap_->concurrent_marking()->RescheduleJobIfNeeded( + GarbageCollector::MINOR_MARK_COMPACTOR); + } + } + heap_->StartMinorMCIncrementalMarkingIfNeeded(); } From cfca972e10788329ee87fb910240da8684f8edd2 Mon Sep 17 00:00:00 2001 From: Leon Bettscheider Date: Thu, 8 Sep 2022 22:40:03 +0000 Subject: [PATCH 0020/1772] [heap] Cancel concurrent workers in minor final pause This CL cancels concurrent workers instead of joining them in MarkLiveObjects. Joining could trigger another costly run. Bug: v8:13012 Change-Id: I873db6e9d612e219060de0fa2447f6c7c0e9de3b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885876 Reviewed-by: Omer Katz Commit-Queue: Leon Bettscheider Cr-Commit-Position: refs/heads/main@{#83105} --- src/heap/concurrent-marking.cc | 5 +++++ src/heap/concurrent-marking.h | 1 + src/heap/mark-compact.cc | 2 +- 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/heap/concurrent-marking.cc b/src/heap/concurrent-marking.cc index 1852355810..f9ee0dd8f7 100644 --- a/src/heap/concurrent-marking.cc +++ b/src/heap/concurrent-marking.cc @@ -954,6 +954,11 @@ bool ConcurrentMarking::Pause() { return true; } +void ConcurrentMarking::Cancel() { + Pause(); + garbage_collector_.reset(); +} + bool ConcurrentMarking::IsStopped() { if (!v8_flags.concurrent_marking && !v8_flags.parallel_marking) return true; diff --git a/src/heap/concurrent-marking.h b/src/heap/concurrent-marking.h index 07b3c8b652..0759c8306c 100644 --- a/src/heap/concurrent-marking.h +++ b/src/heap/concurrent-marking.h @@ -67,6 +67,7 @@ class V8_EXPORT_PRIVATE ConcurrentMarking { // Preempts ongoing job ASAP. Returns true if concurrent marking was in // progress, false otherwise. bool Pause(); + void Cancel(); // Schedules asynchronous job to perform concurrent marking at |priority| if // not already running, otherwise adjusts the number of workers running job diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 38630ed87f..5fd704b622 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -5809,7 +5809,7 @@ void MinorMarkCompactCollector::FinishConcurrentMarking() { if (v8_flags.concurrent_marking) { DCHECK_EQ(heap()->concurrent_marking()->garbage_collector(), GarbageCollector::MINOR_MARK_COMPACTOR); - heap()->concurrent_marking()->Join(); + heap()->concurrent_marking()->Cancel(); heap()->concurrent_marking()->FlushMemoryChunkData( non_atomic_marking_state()); } From 48a511445053e870a77b2974760c24004e2690c9 Mon Sep 17 00:00:00 2001 From: Greg Thompson Date: Fri, 9 Sep 2022 10:11:03 +0200 Subject: [PATCH 0021/1772] [fuchsia] Reland: Migrate d8 to a component framework v2 Fuchsia component In the process, switch to using the Fuchsia GN SDK templates for building the component and package. gni/v8.cmx is retained temporarily until out-of-tree consumers have been updated. Bug: v8:12589 Change-Id: If08cfcbf579696482e7cd60a8b8b80bcc4c7dab2 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885881 Auto-Submit: Greg Thompson Reviewed-by: Michael Achenbach Commit-Queue: Greg Thompson Reviewed-by: Alexander Schulze Cr-Commit-Position: refs/heads/main@{#83106} --- BUILD.gn | 19 +++++++++++++------ gni/OWNERS | 3 ++- gni/v8.cml | 21 +++++++++++++++++++++ 3 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 gni/v8.cml diff --git a/BUILD.gn b/BUILD.gn index 78bddc354e..43667839c5 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -6385,18 +6385,25 @@ group("v8_archive") { # TODO(dglazkov): Remove the "!build_with_chromium" condition once this clause # is removed from Chromium. if (is_fuchsia && !build_with_chromium) { - import("//build/config/fuchsia/rules.gni") + import("//build/config/fuchsia/generate_runner_scripts.gni") + import("//third_party/fuchsia-sdk/sdk/build/component.gni") + import("//third_party/fuchsia-sdk/sdk/build/package.gni") - cr_fuchsia_package("d8_fuchsia_pkg") { + fuchsia_component("d8_component") { testonly = true - binary = ":d8" - manifest = "gni/v8.cmx" - package_name_override = "d8" + manifest = "gni/v8.cml" + data_deps = [ ":d8" ] + } + + fuchsia_package("d8_pkg") { + testonly = true + package_name = "d8" + deps = [ ":d8_component" ] } fuchsia_package_installer("d8_fuchsia") { testonly = true - package = ":d8_fuchsia_pkg" + package = ":d8_pkg" package_name = "d8" } } diff --git a/gni/OWNERS b/gni/OWNERS index fa1262b503..5f16e60686 100644 --- a/gni/OWNERS +++ b/gni/OWNERS @@ -1,5 +1,6 @@ file:../INFRA_OWNERS +per-file v8.cml=victorgomes@chromium.org per-file v8.cmx=victorgomes@chromium.org per-file release_branch_toggle.gni=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com -per-file release_branch_toggle.gni=vahl@chromium.org \ No newline at end of file +per-file release_branch_toggle.gni=vahl@chromium.org diff --git a/gni/v8.cml b/gni/v8.cml new file mode 100644 index 0000000000..4d74c7626c --- /dev/null +++ b/gni/v8.cml @@ -0,0 +1,21 @@ +// Copyright 2022 The V8 project authors +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +{ + include: [ "syslog/client.shard.cml" ], + program: { + runner: "elf", + binary: "d8", + }, + use: [ + { + protocol: [ + "fuchsia.kernel.VmexResource", + ], + }, + { + storage: "tmp", + path: "/tmp", + }, + ], +} From 757398413ada60d1134cb1502a28ed908ad83083 Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Fri, 9 Sep 2022 13:02:06 +0200 Subject: [PATCH 0022/1772] Reland "[heap] Do precise search in free list for new space" This is a reland of commit 72d6dc6d5e5b43f9b09cb5cc125241fcb62a674c Original change's description: > [heap] Do precise search in free list for new space > > In case the free list fast path fails, do a precise search through the > precise category for the current allocation. > > Bug: v8:12612 > Change-Id: I120e64b0d09b9cf5a776188180d6e6c53c44886b > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879494 > Reviewed-by: Michael Lippautz > Commit-Queue: Omer Katz > Cr-Commit-Position: refs/heads/main@{#83096} Bug: v8:12612 Change-Id: I2075c8a509265a16a133b309f98eefad7b011212 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885889 Commit-Queue: Omer Katz Reviewed-by: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#83107} --- src/heap/free-list.cc | 47 +++++++++++++++++++++++++++++++++++++++--- src/heap/free-list.h | 29 +++++++++++++++++++++++++- src/heap/new-spaces.cc | 2 +- 3 files changed, 73 insertions(+), 5 deletions(-) diff --git a/src/heap/free-list.cc b/src/heap/free-list.cc index 512f6759d7..b28c6a64c7 100644 --- a/src/heap/free-list.cc +++ b/src/heap/free-list.cc @@ -111,6 +111,10 @@ void FreeListCategory::Relink(FreeList* owner) { FreeList* FreeList::CreateFreeList() { return new FreeListManyCachedOrigin(); } +FreeList* FreeList::CreateFreeListForNewSpace() { + return new FreeListManyCachedOriginForNewSpace(); +} + FreeSpace FreeList::TryFindNodeIn(FreeListCategoryType type, size_t minimum_size, size_t* node_size) { FreeListCategory* category = categories_[type]; @@ -361,12 +365,14 @@ FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes, // Fast path part 2: searching the medium categories for tiny objects if (node.is_null()) { if (size_in_bytes <= kTinyObjectMaxSize) { + DCHECK_EQ(kFastPathFirstCategory, first_category); for (type = next_nonempty_category[kFastPathFallBackTiny]; type < kFastPathFirstCategory; type = next_nonempty_category[type + 1]) { node = TryFindNodeIn(type, size_in_bytes, node_size); if (!node.is_null()) break; } + first_category = kFastPathFallBackTiny; } } @@ -387,19 +393,41 @@ FreeSpace FreeListManyCachedFastPath::Allocate(size_t size_in_bytes, } } - // Updating cache - if (!node.is_null() && categories_[type] == nullptr) { - UpdateCacheAfterRemoval(type); + if (!node.is_null()) { + if (categories_[type] == nullptr) UpdateCacheAfterRemoval(type); + Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size); } #ifdef DEBUG CheckCacheIntegrity(); #endif + DCHECK(IsVeryLong() || Available() == SumFreeLists()); + return node; +} + +// ------------------------------------------------ +// FreeListManyCachedFastPathForNewSpace implementation + +FreeSpace FreeListManyCachedFastPathForNewSpace::Allocate( + size_t size_in_bytes, size_t* node_size, AllocationOrigin origin) { + FreeSpace node = + FreeListManyCachedFastPath::Allocate(size_in_bytes, node_size, origin); + if (!node.is_null()) return node; + + // Search through the precise category for a fit + FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes); + node = SearchForNodeInList(type, size_in_bytes, node_size); + if (!node.is_null()) { + if (categories_[type] == nullptr) UpdateCacheAfterRemoval(type); Page::FromHeapObject(node)->IncreaseAllocatedBytes(*node_size); } +#ifdef DEBUG + CheckCacheIntegrity(); +#endif + DCHECK(IsVeryLong() || Available() == SumFreeLists()); return node; } @@ -418,6 +446,19 @@ FreeSpace FreeListManyCachedOrigin::Allocate(size_t size_in_bytes, } } +// ------------------------------------------------ +// FreeListManyCachedOriginForNewSpace implementation + +FreeSpace FreeListManyCachedOriginForNewSpace::Allocate( + size_t size_in_bytes, size_t* node_size, AllocationOrigin origin) { + if (origin == AllocationOrigin::kGC) { + return FreeListManyCached::Allocate(size_in_bytes, node_size, origin); + } else { + return FreeListManyCachedFastPathForNewSpace::Allocate(size_in_bytes, + node_size, origin); + } +} + // ------------------------------------------------ // Generic FreeList methods (non alloc/free related) diff --git a/src/heap/free-list.h b/src/heap/free-list.h index 896eed570b..c8fb58a36a 100644 --- a/src/heap/free-list.h +++ b/src/heap/free-list.h @@ -122,8 +122,10 @@ class FreeListCategory { // categories would scatter allocation more. class FreeList { public: - // Creates a Freelist of the default class (FreeListLegacy for now). + // Creates a Freelist of the default class. V8_EXPORT_PRIVATE static FreeList* CreateFreeList(); + // Creates a Freelist for new space. + V8_EXPORT_PRIVATE static FreeList* CreateFreeListForNewSpace(); virtual ~FreeList() = default; @@ -473,6 +475,21 @@ class V8_EXPORT_PRIVATE FreeListManyCachedFastPath : public FreeListManyCached { FreeListManyCachedFastPathSelectFastAllocationFreeListCategoryType); }; +// Same as FreeListManyCachedFastPath but falls back to a precise search of the +// precise category in case allocation fails. Because new space is relatively +// small, the free list is also relatively small and larger categories are more +// likely to be empty. The precise search is meant to avoid an allocation +// failure and thus avoid GCs. +class V8_EXPORT_PRIVATE FreeListManyCachedFastPathForNewSpace + : public FreeListManyCachedFastPath { + public: + V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes, + size_t* node_size, + AllocationOrigin origin) override; + + protected: +}; + // Uses FreeListManyCached if in the GC; FreeListManyCachedFastPath otherwise. // The reasoning behind this FreeList is the following: the GC runs in // parallel, and therefore, more expensive allocations there are less @@ -489,6 +506,16 @@ class V8_EXPORT_PRIVATE FreeListManyCachedOrigin AllocationOrigin origin) override; }; +// Similar to FreeListManyCachedOrigin but uses +// FreeListManyCachedFastPathForNewSpace for allocations outside the GC. +class V8_EXPORT_PRIVATE FreeListManyCachedOriginForNewSpace + : public FreeListManyCachedFastPathForNewSpace { + public: + V8_WARN_UNUSED_RESULT FreeSpace Allocate(size_t size_in_bytes, + size_t* node_size, + AllocationOrigin origin) override; +}; + } // namespace internal } // namespace v8 diff --git a/src/heap/new-spaces.cc b/src/heap/new-spaces.cc index 89f7501655..719d05e26b 100644 --- a/src/heap/new-spaces.cc +++ b/src/heap/new-spaces.cc @@ -902,7 +902,7 @@ PagedSpaceForNewSpace::PagedSpaceForNewSpace( LinearAllocationArea& allocation_info, LinearAreaOriginalData& linear_area_original_data) : PagedSpaceBase(heap, NEW_SPACE, NOT_EXECUTABLE, - FreeList::CreateFreeList(), allocation_counter, + FreeList::CreateFreeListForNewSpace(), allocation_counter, allocation_info, linear_area_original_data, CompactionSpaceKind::kNone), initial_capacity_(RoundDown(initial_capacity, Page::kPageSize)), From 53d24ef68bf6a6c2148940170dc22e82a083c67c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Samuel=20Gro=C3=9F?= Date: Fri, 9 Sep 2022 11:38:00 +0000 Subject: [PATCH 0023/1772] [sandbox] Fix operation ordering during String externalization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When externalizing a string, the external pointer slots need to be initialized before the new Map is installed. Otherwise, a GC marking thread may see the new Map before the slots are valid. In that case, it would attempt to mark invalid ExternalPointerTable entries as alive, leading to a crash. Bug: chromium:1361557 Change-Id: I47f19e6d9576fab0809dca36388cdfa9c28113e7 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885891 Reviewed-by: Patrick Thier Commit-Queue: Samuel Groß Cr-Commit-Position: refs/heads/main@{#83108} --- src/objects/string.cc | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/src/objects/string.cc b/src/objects/string.cc index 0601a978d3..e97fb99669 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -183,6 +183,18 @@ Map ComputeThinStringMap(IsolateT* isolate, StringShape from_string_shape, return one_byte ? roots.thin_one_byte_string_map() : roots.thin_string_map(); } +void InitExternalPointerFieldsDuringExternalization(String string, Map new_map, + Isolate* isolate) { + string.InitExternalPointerField( + ExternalString::kResourceOffset, isolate, kNullAddress); + bool is_uncached = (new_map.instance_type() & kUncachedExternalStringMask) == + kUncachedExternalStringTag; + if (!is_uncached) { + string.InitExternalPointerField( + ExternalString::kResourceDataOffset, isolate, kNullAddress); + } +} + } // namespace template @@ -347,6 +359,12 @@ void String::MakeExternalDuringGC(Isolate* isolate, T* resource) { isolate->heap()->NotifyObjectSizeChange(*this, size, new_size, ClearRecordedSlots::kNo); + // The external pointer slots must be initialized before the new map is + // installed. Otherwise, a GC marking thread may see the new map before the + // slots are initialized and attempt to mark the (invalid) external pointers + // table entries as alive. + InitExternalPointerFieldsDuringExternalization(*this, new_map, isolate); + // We are storing the new map using release store after creating a filler in // the NotifyObjectSizeChange call for the left-over space to avoid races with // the sweeper thread. @@ -354,11 +372,9 @@ void String::MakeExternalDuringGC(Isolate* isolate, T* resource) { if constexpr (is_one_byte) { ExternalOneByteString self = ExternalOneByteString::cast(*this); - self.InitExternalPointerFields(isolate); self.SetResource(isolate, resource); } else { ExternalTwoByteString self = ExternalTwoByteString::cast(*this); - self.InitExternalPointerFields(isolate); self.SetResource(isolate, resource); } isolate->heap()->RegisterExternalString(*this); @@ -430,13 +446,18 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { DCHECK(!has_pointers); } + // The external pointer slots must be initialized before the new map is + // installed. Otherwise, a GC marking thread may see the new map before the + // slots are initialized and attempt to mark the (invalid) external pointers + // table entries as alive. + InitExternalPointerFieldsDuringExternalization(*this, new_map, isolate); + // We are storing the new map using release store after creating a filler in // the NotifyObjectSizeChange call for the left-over space to avoid races with // the sweeper thread. this->set_map(new_map, kReleaseStore); ExternalTwoByteString self = ExternalTwoByteString::cast(*this); - self.InitExternalPointerFields(isolate); self.SetResource(isolate, resource); isolate->heap()->RegisterExternalString(*this); // Force regeneration of the hash value. @@ -509,13 +530,18 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { DCHECK(!has_pointers); } + // The external pointer slots must be initialized before the new map is + // installed. Otherwise, a GC marking thread may see the new map before the + // slots are initialized and attempt to mark the (invalid) external pointers + // table entries as alive. + InitExternalPointerFieldsDuringExternalization(*this, new_map, isolate); + // We are storing the new map using release store after creating a filler in // the NotifyObjectSizeChange call for the left-over space to avoid races with // the sweeper thread. this->set_map(new_map, kReleaseStore); ExternalOneByteString self = ExternalOneByteString::cast(*this); - self.InitExternalPointerFields(isolate); self.SetResource(isolate, resource); isolate->heap()->RegisterExternalString(*this); // Force regeneration of the hash value. From d17bc74fc0b80615d2288d6f7cb162fe792c4eeb Mon Sep 17 00:00:00 2001 From: Ting Chou Date: Thu, 8 Sep 2022 16:06:37 +0800 Subject: [PATCH 0024/1772] [riscv] Fix cctest/test-assembler-riscv64/RISCV_UTEST_swlwu. 32-bit values are held in a sign-extended format in 64-bit registers. Which the vaule 0x856AF894 becomes 0xFFFFFFFF856AF894 and failed equality comparison with lwu's result 0x00000000856AF894. XOR the result with 0xFFFFFFFF00000000 before comparison. R=yahan@iscas.ac.cn Change-Id: I4d225ff653070022023ac7f10257ad0c30c24e5b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3881601 Commit-Queue: Yahan Lu Reviewed-by: Yahan Lu Cr-Commit-Position: refs/heads/main@{#83109} --- test/cctest/test-helper-riscv64.h | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/test/cctest/test-helper-riscv64.h b/test/cctest/test-helper-riscv64.h index 3cfc5427c6..6bd2c996e5 100644 --- a/test/cctest/test-helper-riscv64.h +++ b/test/cctest/test-helper-riscv64.h @@ -177,6 +177,10 @@ template void GenAndRunTestForLoadStore(T value, Func test_generator) { DCHECK(sizeof(T) == 4 || sizeof(T) == 8); + using INT_T = typename std::conditional< + std::is_integral::value, T, + typename std::conditional::type>::type; + Isolate* isolate = CcTest::i_isolate(); HandleScope scope(isolate); @@ -194,6 +198,11 @@ void GenAndRunTestForLoadStore(T value, Func test_generator) { assm.fmv_x_w(a0, fa0); } else if (std::is_same::value) { assm.fmv_x_d(a0, fa0); + } else if (std::is_same::value) { + if (base::bit_cast(value) & 0x80000000) { + assm.RV_li(t5, 0xffffffff00000000); + assm.xor_(a0, a0, t5); + } } assm.jr(ra); @@ -202,10 +211,6 @@ void GenAndRunTestForLoadStore(T value, Func test_generator) { Handle code = Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - using INT_T = typename std::conditional< - std::is_integral::value, T, - typename std::conditional::type>::type; - auto f = GeneratedCode::FromCode(*code); int64_t tmp = 0; From 184efc149a20a62a50dc7c0335befa97fd456512 Mon Sep 17 00:00:00 2001 From: Ting Chou Date: Wed, 7 Sep 2022 08:21:27 +0800 Subject: [PATCH 0025/1772] [riscv] Fix cctest/test-assembler-riscv*/RISCV_UTEST_FLOAT_WIDENING_vfwadd_vf. Storing with E64 when SEW=32 has EMUL=2, which copies |n| 64 bit wide data to the result double array already. Besides, accessing v1 when EMUL=2 is reserved. R=yahan@iscas.ac.cn Change-Id: I0870d53c36b642529cab753409f52016d79219b8 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3878442 Commit-Queue: Yahan Lu Reviewed-by: Yahan Lu Cr-Commit-Position: refs/heads/main@{#83110} --- test/cctest/test-assembler-riscv32.cc | 2 -- test/cctest/test-assembler-riscv64.cc | 2 -- 2 files changed, 4 deletions(-) diff --git a/test/cctest/test-assembler-riscv32.cc b/test/cctest/test-assembler-riscv32.cc index ac1f603e38..29f157055f 100644 --- a/test/cctest/test-assembler-riscv32.cc +++ b/test/cctest/test-assembler-riscv32.cc @@ -2068,9 +2068,7 @@ UTEST_RVV_VF_VV_FORM_WITH_OP(vfdiv_vv, /) } \ __ instr_name(v0, v2, fa1); \ __ li(t1, Operand(int64_t(result))); \ - __ li(t2, Operand(int64_t(&result[n / 2]))); \ __ vs(v0, t1, 0, VSew::E64); \ - __ vs(v1, t2, 0, VSew::E64); \ }; \ for (float rs1_fval : compiler::ValueHelper::GetVector()) { \ for (float rs2_fval : compiler::ValueHelper::GetVector()) { \ diff --git a/test/cctest/test-assembler-riscv64.cc b/test/cctest/test-assembler-riscv64.cc index 73e68ec953..804ca05824 100644 --- a/test/cctest/test-assembler-riscv64.cc +++ b/test/cctest/test-assembler-riscv64.cc @@ -2332,9 +2332,7 @@ UTEST_RVV_VF_VV_FORM_WITH_OP(vfdiv_vv, /) } \ __ instr_name(v0, v2, fa1); \ __ li(t1, Operand(int64_t(result))); \ - __ li(t2, Operand(int64_t(&result[n / 2]))); \ __ vs(v0, t1, 0, VSew::E64); \ - __ vs(v1, t2, 0, VSew::E64); \ }; \ for (float rs1_fval : compiler::ValueHelper::GetVector()) { \ for (float rs2_fval : compiler::ValueHelper::GetVector()) { \ From 6852c402e7bb3a91a366922ccd15b930907425c2 Mon Sep 17 00:00:00 2001 From: Matthias Liedtke Date: Fri, 9 Sep 2022 13:54:45 +0000 Subject: [PATCH 0026/1772] Revert "[sandbox] Fold V8_SANDBOXED_EXTERNAL_POINTERS into V8_ENABLE_SANDBOX" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 49c59678301fafcd7f70221cdd7936253a229093. Reason for revert: The change is suspected to be breaking chromium's determinism test: https://ci.chromium.org/ui/p/chromium/builders/ci/Deterministic%20Linux/35003/overview Original change's description: > [sandbox] Fold V8_SANDBOXED_EXTERNAL_POINTERS into V8_ENABLE_SANDBOX > > Now that all external pointers have been sandboxed, > V8_SANDBOXED_EXTERNAL_POINTERS is no longer needed. This change also > shrinks external pointer slots to 32 bits when the sandbox is enabled. > > Bug: v8:10391 > Change-Id: Iccbef27ac107b988cb23fe9ef66da6fe0bae087a > Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3869269 > Reviewed-by: Leszek Swirski > Reviewed-by: Manos Koukoutos > Reviewed-by: Nico Hartmann > Reviewed-by: Igor Sheludko > Commit-Queue: Samuel Groß > Cr-Commit-Position: refs/heads/main@{#83083} Bug: v8:10391 Change-Id: I515ba771aa21f58b752a3a5b36b4deb2abc5f9c0 Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3886870 Commit-Queue: Matthias Liedtke Bot-Commit: Rubber Stamper Owners-Override: Matthias Liedtke Cr-Commit-Position: refs/heads/main@{#83111} --- BUILD.gn | 12 ++++ include/v8-initialization.h | 6 +- include/v8-internal.h | 19 ++++-- src/api/api.cc | 11 ++++ src/codegen/tnode.h | 2 +- src/common/globals.h | 8 ++- src/objects/slots-inl.h | 4 +- src/torque/torque-parser.cc | 2 + src/wasm/wasm-objects.tq | 6 +- test/cctest/test-api.cc | 5 +- test/cctest/test-strings.cc | 16 ++--- tools/v8heapconst.py | 122 ++++++++++++++++++------------------ 12 files changed, 130 insertions(+), 83 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 43667839c5..4b8965626b 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -307,6 +307,10 @@ declare_args() { # Sets -DV8_ENABLE_SANDBOX. v8_enable_sandbox = "" + # Enable sandboxing for all external pointers. Requires v8_enable_sandbox. + # Sets -DV8_SANDBOXED_EXTERNAL_POINTERS. + v8_enable_sandboxed_external_pointers = false + # Enable all available sandbox features. Implies v8_enable_sandbox. v8_enable_sandbox_future = false @@ -530,6 +534,7 @@ if (v8_enable_sandbox == "") { # Enable all available sandbox features if sandbox future is enabled. if (v8_enable_sandbox_future) { + v8_enable_sandboxed_external_pointers = true v8_enable_sandbox = true } @@ -564,6 +569,9 @@ assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage, assert(!v8_enable_sandbox || v8_enable_external_code_space, "The sandbox requires the external code space") +assert(!v8_enable_sandboxed_external_pointers || v8_enable_sandbox, + "Sandboxed external pointers require the sandbox") + assert(!v8_expose_memory_corruption_api || v8_enable_sandbox, "The Memory Corruption API requires the sandbox") @@ -741,6 +749,7 @@ external_v8_defines = [ "V8_31BIT_SMIS_ON_64BIT_ARCH", "V8_COMPRESS_ZONES", "V8_ENABLE_SANDBOX", + "V8_SANDBOXED_EXTERNAL_POINTERS", "V8_DEPRECATION_WARNINGS", "V8_IMMINENT_DEPRECATION_WARNINGS", "V8_NO_ARGUMENTS_ADAPTOR", @@ -771,6 +780,9 @@ if (v8_enable_zone_compression) { if (v8_enable_sandbox) { enabled_external_v8_defines += [ "V8_ENABLE_SANDBOX" ] } +if (v8_enable_sandboxed_external_pointers) { + enabled_external_v8_defines += [ "V8_SANDBOXED_EXTERNAL_POINTERS" ] +} if (v8_deprecation_warnings) { enabled_external_v8_defines += [ "V8_DEPRECATION_WARNINGS" ] } diff --git a/include/v8-initialization.h b/include/v8-initialization.h index d3e35d6ec5..7bbec662a7 100644 --- a/include/v8-initialization.h +++ b/include/v8-initialization.h @@ -100,6 +100,9 @@ class V8_EXPORT V8 { const int kBuildConfiguration = (internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) | (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) | + (internal::SandboxedExternalPointersAreEnabled() + ? kSandboxedExternalPointers + : 0) | (internal::SandboxIsEnabled() ? kSandbox : 0); return Initialize(kBuildConfiguration); } @@ -270,7 +273,8 @@ class V8_EXPORT V8 { enum BuildConfigurationFeatures { kPointerCompression = 1 << 0, k31BitSmis = 1 << 1, - kSandbox = 1 << 2, + kSandboxedExternalPointers = 1 << 2, + kSandbox = 1 << 3, }; /** diff --git a/include/v8-internal.h b/include/v8-internal.h index ed6aff1426..818c720cb4 100644 --- a/include/v8-internal.h +++ b/include/v8-internal.h @@ -166,6 +166,14 @@ constexpr bool SandboxIsEnabled() { #endif } +constexpr bool SandboxedExternalPointersAreEnabled() { +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS + return true; +#else + return false; +#endif +} + // SandboxedPointers are guaranteed to point into the sandbox. This is achieved // for example by storing them as offset rather than as raw pointers. using SandboxedPointer_t = Address; @@ -264,7 +272,7 @@ using ExternalPointerHandle = uint32_t; // ExternalPointers point to objects located outside the sandbox. When // sandboxed external pointers are enabled, these are stored on heap as // ExternalPointerHandles, otherwise they are simply raw pointers. -#ifdef V8_ENABLE_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS using ExternalPointer_t = ExternalPointerHandle; #else using ExternalPointer_t = Address; @@ -391,8 +399,9 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = { // When the sandbox is enabled, external pointers marked as "sandboxed" above // use the external pointer table (i.e. are sandboxed). This allows a gradual -// rollout of external pointer sandboxing. If the sandbox is off, no external -// pointers are sandboxed. +// rollout of external pointer sandboxing. If V8_SANDBOXED_EXTERNAL_POINTERS is +// defined, all external pointers are sandboxed. If the sandbox is off, no +// external pointers are sandboxed. // // Sandboxed external pointer tags are available when compressing pointers even // when the sandbox is off. Some tags (e.g. kWaiterQueueNodeTag) are used @@ -400,7 +409,9 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = { // alignment requirements. #define sandboxed(X) (X << kExternalPointerTagShift) | kExternalPointerMarkBit #define unsandboxed(X) kUnsandboxedExternalPointerTag -#if defined(V8_COMPRESS_POINTERS) +#if defined(V8_SANDBOXED_EXTERNAL_POINTERS) +#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = sandboxed(Bits), +#elif defined(V8_COMPRESS_POINTERS) #define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = State(Bits), #else #define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = unsandboxed(Bits), diff --git a/src/api/api.cc b/src/api/api.cc index 0f272393b4..6eaea2dc3e 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -6157,6 +6157,17 @@ bool v8::V8::Initialize(const int build_config) { kEmbedderSmiValueSize, internal::kSmiValueSize); } + const bool kEmbedderSandboxedExternalPointers = + (build_config & kSandboxedExternalPointers) != 0; + if (kEmbedderSandboxedExternalPointers != + V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) { + FATAL( + "Embedder-vs-V8 build configuration mismatch. On embedder side " + "sandboxed external pointers is %s while on V8 side it's %s.", + kEmbedderSandboxedExternalPointers ? "ENABLED" : "DISABLED", + V8_SANDBOXED_EXTERNAL_POINTERS_BOOL ? "ENABLED" : "DISABLED"); + } + const bool kEmbedderSandbox = (build_config & kSandbox) != 0; if (kEmbedderSandbox != V8_ENABLE_SANDBOX_BOOL) { FATAL( diff --git a/src/codegen/tnode.h b/src/codegen/tnode.h index 1094e7faaf..ecd2974b95 100644 --- a/src/codegen/tnode.h +++ b/src/codegen/tnode.h @@ -88,7 +88,7 @@ struct ExternalPointerHandleT : Uint32T { static constexpr MachineType kMachineType = MachineType::Uint32(); }; -#ifdef V8_ENABLE_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS struct ExternalPointerT : Uint32T { static constexpr MachineType kMachineType = MachineType::Uint32(); }; diff --git a/src/common/globals.h b/src/common/globals.h index 467a5197ec..bd3fd56e87 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -124,6 +124,12 @@ namespace internal { #define V8_CAN_CREATE_SHARED_HEAP_BOOL false #endif +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS +#define V8_SANDBOXED_EXTERNAL_POINTERS_BOOL true +#else +#define V8_SANDBOXED_EXTERNAL_POINTERS_BOOL false +#endif + #ifdef V8_ENABLE_SANDBOX #define V8_ENABLE_SANDBOX_BOOL true #else @@ -505,7 +511,7 @@ static_assert(kPointerSize == (1 << kPointerSizeLog2)); // This type defines raw storage type for external (or off-V8 heap) pointers // stored on V8 heap. constexpr int kExternalPointerSlotSize = sizeof(ExternalPointer_t); -#ifdef V8_ENABLE_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS static_assert(kExternalPointerSlotSize == kTaggedSize); #else static_assert(kExternalPointerSlotSize == kSystemPointerSize); diff --git a/src/objects/slots-inl.h b/src/objects/slots-inl.h index 989a553f81..021293b402 100644 --- a/src/objects/slots-inl.h +++ b/src/objects/slots-inl.h @@ -221,7 +221,7 @@ void ExternalPointerSlot::store(Isolate* isolate, Address value, ExternalPointerSlot::RawContent ExternalPointerSlot::GetAndClearContentForSerialization( const DisallowGarbageCollection& no_gc) { -#ifdef V8_ENABLE_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS ExternalPointerHandle content = Relaxed_LoadHandle(); Relaxed_StoreHandle(kNullExternalPointerHandle); #else @@ -234,7 +234,7 @@ ExternalPointerSlot::GetAndClearContentForSerialization( void ExternalPointerSlot::RestoreContentAfterSerialization( ExternalPointerSlot::RawContent content, const DisallowGarbageCollection& no_gc) { -#ifdef V8_ENABLE_SANDBOX +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS return Relaxed_StoreHandle(content); #else return WriteMaybeUnalignedValue
(address(), content); diff --git a/src/torque/torque-parser.cc b/src/torque/torque-parser.cc index 596cc0740d..92481b2395 100644 --- a/src/torque/torque-parser.cc +++ b/src/torque/torque-parser.cc @@ -68,6 +68,8 @@ class BuildFlags : public ContextualClass { build_flags_["V8_ENABLE_WEBASSEMBLY"] = false; #endif build_flags_["V8_ENABLE_SANDBOX"] = V8_ENABLE_SANDBOX_BOOL; + build_flags_["V8_SANDBOXED_EXTERNAL_POINTERS"] = + V8_SANDBOXED_EXTERNAL_POINTERS_BOOL; build_flags_["DEBUG"] = DEBUG_BOOL; } static bool GetFlag(const std::string& name, const char* production) { diff --git a/src/wasm/wasm-objects.tq b/src/wasm/wasm-objects.tq index 55a7e7458d..9807983b1d 100644 --- a/src/wasm/wasm-objects.tq +++ b/src/wasm/wasm-objects.tq @@ -14,9 +14,9 @@ extern class WasmInstanceObject extends JSObject; // Represents the context of a function that is defined through the JS or C // APIs. Corresponds to the WasmInstanceObject passed to a Wasm function // reference. -// TODO(manoskouk): If V8_ENABLE_SANDBOX, we cannot encode the isolate_root as -// a sandboxed pointer, because that would require having access to the isolate -// root in the first place. +// TODO(manoskouk): If V8_SANDBOXED_EXTERNAL_POINTERS, we cannot encode the +// isolate_root as a sandboxed pointer, because that would require having access +// to the isolate root in the first place. extern class WasmApiFunctionRef extends HeapObject { isolate_root: RawPtr; native_context: NativeContext; diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 5ca1f5175c..7deba60e23 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -687,8 +687,9 @@ TEST(MakingExternalStringConditions) { CHECK(local_string->CanMakeExternal()); // Tiny strings are not in-place externalizable when pointer compression is - // enabled, but they are if the sandbox is enabled. - CHECK_EQ(V8_ENABLE_SANDBOX_BOOL || i::kTaggedSize == i::kSystemPointerSize, + // enabled, but they are if sandboxed external pointers are enabled. + CHECK_EQ(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL || + i::kTaggedSize == i::kSystemPointerSize, tiny_local_string->CanMakeExternal()); } diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc index bfabd3ae38..4cc8ca0791 100644 --- a/test/cctest/test-strings.cc +++ b/test/cctest/test-strings.cc @@ -2066,11 +2066,11 @@ TEST(CheckCachedDataInternalExternalUncachedString) { // that we indeed cached it. Handle external_string = Handle::cast(string); - // If the sandbox is enabled, string objects will always be cacheable because - // they are smaller. - CHECK(V8_ENABLE_SANDBOX_BOOL || external_string->is_uncached()); + // If sandboxed external pointers are enabled, string objects will always be + // cacheable because they are smaller. + CHECK(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL || external_string->is_uncached()); CHECK(external_string->resource()->IsCacheable()); - if (!V8_ENABLE_SANDBOX_BOOL) { + if (!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) { CHECK_NOT_NULL(external_string->resource()->cached_data()); CHECK_EQ(external_string->resource()->cached_data(), external_string->resource()->data()); @@ -2109,11 +2109,11 @@ TEST(CheckCachedDataInternalExternalUncachedStringTwoByte) { // that we indeed cached it. Handle external_string = Handle::cast(string); - // If the sandbox is enabled, string objects will always be cacheable because - // they are smaller. - CHECK(V8_ENABLE_SANDBOX_BOOL || external_string->is_uncached()); + // If sandboxed external pointers are enabled, string objects will always be + // cacheable because they are smaller. + CHECK(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL || external_string->is_uncached()); CHECK(external_string->resource()->IsCacheable()); - if (!V8_ENABLE_SANDBOX_BOOL) { + if (!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) { CHECK_NOT_NULL(external_string->resource()->cached_data()); CHECK_EQ(external_string->resource()->cached_data(), external_string->resource()->data()); diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index 7ed0afec8f..42d72af3a1 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -515,67 +515,67 @@ KNOWN_OBJECTS = { ("read_only_space", 0x04b49): "NativeScopeInfo", ("read_only_space", 0x04b61): "HashSeed", ("old_space", 0x04235): "ArgumentsIteratorAccessor", - ("old_space", 0x0424d): "ArrayLengthAccessor", - ("old_space", 0x04265): "BoundFunctionLengthAccessor", - ("old_space", 0x0427d): "BoundFunctionNameAccessor", - ("old_space", 0x04295): "ErrorStackAccessor", - ("old_space", 0x042ad): "FunctionArgumentsAccessor", - ("old_space", 0x042c5): "FunctionCallerAccessor", - ("old_space", 0x042dd): "FunctionNameAccessor", - ("old_space", 0x042f5): "FunctionLengthAccessor", - ("old_space", 0x0430d): "FunctionPrototypeAccessor", - ("old_space", 0x04325): "SharedArrayLengthAccessor", - ("old_space", 0x0433d): "StringLengthAccessor", - ("old_space", 0x04355): "ValueUnavailableAccessor", - ("old_space", 0x0436d): "WrappedFunctionLengthAccessor", - ("old_space", 0x04385): "WrappedFunctionNameAccessor", - ("old_space", 0x0439d): "InvalidPrototypeValidityCell", - ("old_space", 0x043a5): "EmptyScript", - ("old_space", 0x043e9): "ManyClosuresCell", - ("old_space", 0x043f5): "ArrayConstructorProtector", - ("old_space", 0x04409): "NoElementsProtector", - ("old_space", 0x0441d): "MegaDOMProtector", - ("old_space", 0x04431): "IsConcatSpreadableProtector", - ("old_space", 0x04445): "ArraySpeciesProtector", - ("old_space", 0x04459): "TypedArraySpeciesProtector", - ("old_space", 0x0446d): "PromiseSpeciesProtector", - ("old_space", 0x04481): "RegExpSpeciesProtector", - ("old_space", 0x04495): "StringLengthProtector", - ("old_space", 0x044a9): "ArrayIteratorProtector", - ("old_space", 0x044bd): "ArrayBufferDetachingProtector", - ("old_space", 0x044d1): "PromiseHookProtector", - ("old_space", 0x044e5): "PromiseResolveProtector", - ("old_space", 0x044f9): "MapIteratorProtector", - ("old_space", 0x0450d): "PromiseThenProtector", - ("old_space", 0x04521): "SetIteratorProtector", - ("old_space", 0x04535): "StringIteratorProtector", - ("old_space", 0x04549): "StringSplitCache", - ("old_space", 0x04951): "RegExpMultipleCache", - ("old_space", 0x04d59): "BuiltinsConstantsTable", - ("old_space", 0x051ad): "AsyncFunctionAwaitRejectSharedFun", - ("old_space", 0x051d1): "AsyncFunctionAwaitResolveSharedFun", - ("old_space", 0x051f5): "AsyncGeneratorAwaitRejectSharedFun", - ("old_space", 0x05219): "AsyncGeneratorAwaitResolveSharedFun", - ("old_space", 0x0523d): "AsyncGeneratorYieldResolveSharedFun", - ("old_space", 0x05261): "AsyncGeneratorReturnResolveSharedFun", - ("old_space", 0x05285): "AsyncGeneratorReturnClosedRejectSharedFun", - ("old_space", 0x052a9): "AsyncGeneratorReturnClosedResolveSharedFun", - ("old_space", 0x052cd): "AsyncIteratorValueUnwrapSharedFun", - ("old_space", 0x052f1): "PromiseAllResolveElementSharedFun", - ("old_space", 0x05315): "PromiseAllSettledResolveElementSharedFun", - ("old_space", 0x05339): "PromiseAllSettledRejectElementSharedFun", - ("old_space", 0x0535d): "PromiseAnyRejectElementSharedFun", - ("old_space", 0x05381): "PromiseCapabilityDefaultRejectSharedFun", - ("old_space", 0x053a5): "PromiseCapabilityDefaultResolveSharedFun", - ("old_space", 0x053c9): "PromiseCatchFinallySharedFun", - ("old_space", 0x053ed): "PromiseGetCapabilitiesExecutorSharedFun", - ("old_space", 0x05411): "PromiseThenFinallySharedFun", - ("old_space", 0x05435): "PromiseThrowerFinallySharedFun", - ("old_space", 0x05459): "PromiseValueThunkFinallySharedFun", - ("old_space", 0x0547d): "ProxyRevokeSharedFun", - ("old_space", 0x054a1): "ShadowRealmImportValueFulfilledSFI", - ("old_space", 0x054c5): "SourceTextModuleExecuteAsyncModuleFulfilledSFI", - ("old_space", 0x054e9): "SourceTextModuleExecuteAsyncModuleRejectedSFI", + ("old_space", 0x04255): "ArrayLengthAccessor", + ("old_space", 0x04275): "BoundFunctionLengthAccessor", + ("old_space", 0x04295): "BoundFunctionNameAccessor", + ("old_space", 0x042b5): "ErrorStackAccessor", + ("old_space", 0x042d5): "FunctionArgumentsAccessor", + ("old_space", 0x042f5): "FunctionCallerAccessor", + ("old_space", 0x04315): "FunctionNameAccessor", + ("old_space", 0x04335): "FunctionLengthAccessor", + ("old_space", 0x04355): "FunctionPrototypeAccessor", + ("old_space", 0x04375): "SharedArrayLengthAccessor", + ("old_space", 0x04395): "StringLengthAccessor", + ("old_space", 0x043b5): "ValueUnavailableAccessor", + ("old_space", 0x043d5): "WrappedFunctionLengthAccessor", + ("old_space", 0x043f5): "WrappedFunctionNameAccessor", + ("old_space", 0x04415): "InvalidPrototypeValidityCell", + ("old_space", 0x0441d): "EmptyScript", + ("old_space", 0x04461): "ManyClosuresCell", + ("old_space", 0x0446d): "ArrayConstructorProtector", + ("old_space", 0x04481): "NoElementsProtector", + ("old_space", 0x04495): "MegaDOMProtector", + ("old_space", 0x044a9): "IsConcatSpreadableProtector", + ("old_space", 0x044bd): "ArraySpeciesProtector", + ("old_space", 0x044d1): "TypedArraySpeciesProtector", + ("old_space", 0x044e5): "PromiseSpeciesProtector", + ("old_space", 0x044f9): "RegExpSpeciesProtector", + ("old_space", 0x0450d): "StringLengthProtector", + ("old_space", 0x04521): "ArrayIteratorProtector", + ("old_space", 0x04535): "ArrayBufferDetachingProtector", + ("old_space", 0x04549): "PromiseHookProtector", + ("old_space", 0x0455d): "PromiseResolveProtector", + ("old_space", 0x04571): "MapIteratorProtector", + ("old_space", 0x04585): "PromiseThenProtector", + ("old_space", 0x04599): "SetIteratorProtector", + ("old_space", 0x045ad): "StringIteratorProtector", + ("old_space", 0x045c1): "StringSplitCache", + ("old_space", 0x049c9): "RegExpMultipleCache", + ("old_space", 0x04dd1): "BuiltinsConstantsTable", + ("old_space", 0x05225): "AsyncFunctionAwaitRejectSharedFun", + ("old_space", 0x05249): "AsyncFunctionAwaitResolveSharedFun", + ("old_space", 0x0526d): "AsyncGeneratorAwaitRejectSharedFun", + ("old_space", 0x05291): "AsyncGeneratorAwaitResolveSharedFun", + ("old_space", 0x052b5): "AsyncGeneratorYieldResolveSharedFun", + ("old_space", 0x052d9): "AsyncGeneratorReturnResolveSharedFun", + ("old_space", 0x052fd): "AsyncGeneratorReturnClosedRejectSharedFun", + ("old_space", 0x05321): "AsyncGeneratorReturnClosedResolveSharedFun", + ("old_space", 0x05345): "AsyncIteratorValueUnwrapSharedFun", + ("old_space", 0x05369): "PromiseAllResolveElementSharedFun", + ("old_space", 0x0538d): "PromiseAllSettledResolveElementSharedFun", + ("old_space", 0x053b1): "PromiseAllSettledRejectElementSharedFun", + ("old_space", 0x053d5): "PromiseAnyRejectElementSharedFun", + ("old_space", 0x053f9): "PromiseCapabilityDefaultRejectSharedFun", + ("old_space", 0x0541d): "PromiseCapabilityDefaultResolveSharedFun", + ("old_space", 0x05441): "PromiseCatchFinallySharedFun", + ("old_space", 0x05465): "PromiseGetCapabilitiesExecutorSharedFun", + ("old_space", 0x05489): "PromiseThenFinallySharedFun", + ("old_space", 0x054ad): "PromiseThrowerFinallySharedFun", + ("old_space", 0x054d1): "PromiseValueThunkFinallySharedFun", + ("old_space", 0x054f5): "ProxyRevokeSharedFun", + ("old_space", 0x05519): "ShadowRealmImportValueFulfilledSFI", + ("old_space", 0x0553d): "SourceTextModuleExecuteAsyncModuleFulfilledSFI", + ("old_space", 0x05561): "SourceTextModuleExecuteAsyncModuleRejectedSFI", } # Lower 32 bits of first page addresses for various heap spaces. From 415ef632806b99dbacfaab033925484ead3f536d Mon Sep 17 00:00:00 2001 From: Milad Fa Date: Fri, 9 Sep 2022 09:12:40 -0400 Subject: [PATCH 0027/1772] PPC/s390: [wasm][liftoff] Fix and cleanup tracing of return value Port 6f9e71fa74eb589a48c0f5065ac961a64cb515a3 Original Commit Message: - Fix tracing of reference return values. StoreTaggedPointer should not use the write barrier since we are writing to the stack. - Avoid re-allocating a slot for the return value when it is already spilled. R=thibaudm@chromium.org, joransiu@ca.ibm.com, junyan@redhat.com, midawson@redhat.com BUG= LOG=N Change-Id: I5b16259b1c6e8c019f6b17e8efb7947776e4ee24 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3886398 Reviewed-by: Thibaud Michaud Commit-Queue: Milad Farazmand Cr-Commit-Position: refs/heads/main@{#83112} --- src/wasm/baseline/ppc/liftoff-assembler-ppc.h | 4 ++++ src/wasm/baseline/s390/liftoff-assembler-s390.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index ef158b588c..7b8b5837fd 100644 --- a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -1093,6 +1093,10 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { + SubS64(dst, fp, Operand(offset)); +} + #define SIGN_EXT(r) extsw(r, r) #define ROUND_F64_TO_F32(fpr) frsp(fpr, fpr) #define INT32_AND_WITH_1F(x) Operand(x & 0x1f) diff --git a/src/wasm/baseline/s390/liftoff-assembler-s390.h b/src/wasm/baseline/s390/liftoff-assembler-s390.h index f4e06ad102..0476818b2f 100644 --- a/src/wasm/baseline/s390/liftoff-assembler-s390.h +++ b/src/wasm/baseline/s390/liftoff-assembler-s390.h @@ -1552,6 +1552,10 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { pop(r0); } +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { + SubS64(dst, fp, Operand(offset)); +} + #define SIGN_EXT(r) lgfr(r, r) #define INT32_AND_WITH_1F(x) Operand(x & 0x1f) #define REGISTER_AND_WITH_1F \ From f85e8c47ccbab6369c7b05a28cf47070d5d1640d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marja=20H=C3=B6ltt=C3=A4?= Date: Fri, 9 Sep 2022 15:35:20 +0200 Subject: [PATCH 0028/1772] [interpreter,baseline] Make FindNonDefaultConstructor use a RegOutPair MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This allows (de)optimizing it in TF. Bug: v8:13091 Change-Id: Iba64df02379dbf3ac07c96e10facb728e7d10501 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3886869 Auto-Submit: Marja Hölttä Reviewed-by: Leszek Swirski Commit-Queue: Marja Hölttä Commit-Queue: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83113} --- src/baseline/baseline-compiler.cc | 3 ++- src/interpreter/bytecode-array-builder.cc | 6 ++---- src/interpreter/bytecode-array-builder.h | 6 +++--- src/interpreter/bytecode-generator.cc | 8 +++++--- src/interpreter/bytecodes.h | 4 ++-- src/interpreter/interpreter-generator.cc | 16 +++++++--------- 6 files changed, 21 insertions(+), 22 deletions(-) diff --git a/src/baseline/baseline-compiler.cc b/src/baseline/baseline-compiler.cc index 8f8e2c9097..5acdb028dd 100644 --- a/src/baseline/baseline-compiler.cc +++ b/src/baseline/baseline-compiler.cc @@ -1161,9 +1161,10 @@ void BaselineCompiler::VisitGetSuperConstructor() { } void BaselineCompiler::VisitFindNonDefaultConstructor() { + SaveAccumulatorScope accumulator_scope(&basm_); CallBuiltin(RegisterOperand(0), RegisterOperand(1)); - StoreRegister(2, kReturnRegister1); + StoreRegisterPair(2, kReturnRegister0, kReturnRegister1); } namespace { diff --git a/src/interpreter/bytecode-array-builder.cc b/src/interpreter/bytecode-array-builder.cc index d874873f00..1548986e0e 100644 --- a/src/interpreter/bytecode-array-builder.cc +++ b/src/interpreter/bytecode-array-builder.cc @@ -527,10 +527,8 @@ BytecodeArrayBuilder& BytecodeArrayBuilder::GetSuperConstructor(Register out) { } BytecodeArrayBuilder& BytecodeArrayBuilder::FindNonDefaultConstructor( - Register this_function, Register new_target, - Register constructor_or_instance) { - OutputFindNonDefaultConstructor(this_function, new_target, - constructor_or_instance); + Register this_function, Register new_target, RegisterList output) { + OutputFindNonDefaultConstructor(this_function, new_target, output); return *this; } diff --git a/src/interpreter/bytecode-array-builder.h b/src/interpreter/bytecode-array-builder.h index f27b7af17c..9fb13b8cf3 100644 --- a/src/interpreter/bytecode-array-builder.h +++ b/src/interpreter/bytecode-array-builder.h @@ -386,9 +386,9 @@ class V8_EXPORT_PRIVATE BytecodeArrayBuilder final { // throws a TypeError exception. BytecodeArrayBuilder& GetSuperConstructor(Register out); - BytecodeArrayBuilder& FindNonDefaultConstructor( - Register this_function, Register new_target, - Register constructor_or_instance); + BytecodeArrayBuilder& FindNonDefaultConstructor(Register this_function, + Register new_target, + RegisterList output); // Deletes property from an object. This expects that accumulator contains // the key to be deleted and the register contains a reference to the object. diff --git a/src/interpreter/bytecode-generator.cc b/src/interpreter/bytecode-generator.cc index 5f86f4d3b7..c027fb8b07 100644 --- a/src/interpreter/bytecode-generator.cc +++ b/src/interpreter/bytecode-generator.cc @@ -5791,9 +5791,11 @@ void BytecodeGenerator::BuildSuperCallOptimization( Register this_function, Register new_target, Register constructor_then_instance, BytecodeLabel* super_ctor_call_done) { DCHECK(FLAG_omit_default_ctors); - builder()->FindNonDefaultConstructor(this_function, new_target, - constructor_then_instance); - builder()->JumpIfTrue(ToBooleanMode::kAlreadyBoolean, super_ctor_call_done); + RegisterList output = register_allocator()->NewRegisterList(2); + builder()->FindNonDefaultConstructor(this_function, new_target, output); + builder()->MoveRegister(output[1], constructor_then_instance); + builder()->LoadAccumulatorWithRegister(output[0]).JumpIfTrue( + ToBooleanMode::kAlreadyBoolean, super_ctor_call_done); } void BytecodeGenerator::VisitCallNew(CallNew* expr) { diff --git a/src/interpreter/bytecodes.h b/src/interpreter/bytecodes.h index e5bade3fc5..bb0dafe16d 100644 --- a/src/interpreter/bytecodes.h +++ b/src/interpreter/bytecodes.h @@ -228,8 +228,8 @@ namespace interpreter { /* GetSuperConstructor operator */ \ V(GetSuperConstructor, ImplicitRegisterUse::kReadAccumulator, \ OperandType::kRegOut) \ - V(FindNonDefaultConstructor, ImplicitRegisterUse::kWriteAccumulator, \ - OperandType::kReg, OperandType::kReg, OperandType::kRegOut) \ + V(FindNonDefaultConstructor, ImplicitRegisterUse::kNone, OperandType::kReg, \ + OperandType::kReg, OperandType::kRegOutPair) \ \ /* Call operations */ \ V(CallAnyReceiver, ImplicitRegisterUse::kWriteAccumulator, \ diff --git a/src/interpreter/interpreter-generator.cc b/src/interpreter/interpreter-generator.cc index 25ad8b0661..57f2eeae3c 100644 --- a/src/interpreter/interpreter-generator.cc +++ b/src/interpreter/interpreter-generator.cc @@ -2781,14 +2781,13 @@ IGNITION_HANDLER(ThrowIfNotSuperConstructor, InterpreterAssembler) { } } -// FinNonDefaultConstructor -// +// FinNonDefaultConstructor // // Walks the prototype chain from 's super ctor until we see a // non-default ctor. If the walk ends at a default base ctor, creates an -// instance and stores it in and stores true into the -// accumulator. Otherwise, stores the first non-default ctor into -// and false into the accumulator. +// instance and stores it in and stores true into output[0]. +// Otherwise, stores the first non-default ctor into and false into +// . IGNITION_HANDLER(FindNonDefaultConstructor, InterpreterAssembler) { TNode context = GetContext(); TVARIABLE(Object, constructor); @@ -2806,16 +2805,15 @@ IGNITION_HANDLER(FindNonDefaultConstructor, InterpreterAssembler) { TNode new_target = LoadRegisterAtOperandIndex(1); TNode instance = CallBuiltin(Builtin::kFastNewObject, context, constructor.value(), new_target); - StoreRegisterAtOperandIndex(instance, 2); - SetAccumulator(TrueConstant()); + + StoreRegisterPairAtOperandIndex(TrueConstant(), instance, 2); Dispatch(); } BIND(&found_something_else); { // Not a base ctor (or bailed out). - StoreRegisterAtOperandIndex(constructor.value(), 2); - SetAccumulator(FalseConstant()); + StoreRegisterPairAtOperandIndex(FalseConstant(), constructor.value(), 2); Dispatch(); } } From 779da1d06683b9aa591173c5658450717838481a Mon Sep 17 00:00:00 2001 From: Al Muthanna Athamina Date: Fri, 9 Sep 2022 14:35:25 +0200 Subject: [PATCH 0029/1772] [NumFuzz] Skip flakey tests on interrupt fuzzer Bug: v8:13269 Change-Id: Icb8b83b5f4695a9739d10d15936f4fead3b35ad1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3886865 Reviewed-by: Michael Achenbach Commit-Queue: Almothana Athamneh Cr-Commit-Position: refs/heads/main@{#83114} --- test/mjsunit/mjsunit.status | 11 +++++++---- tools/testrunner/base_runner.py | 2 ++ tools/testrunner/num_fuzzer.py | 27 +++++++++++++++++---------- 3 files changed, 26 insertions(+), 14 deletions(-) diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index b98c1b8ebc..aa8a4fef85 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -1101,14 +1101,17 @@ # BUG(v8:11656) Skipped until we make progress on NumFuzz. 'baseline/test-osr': [SKIP], + # BUG(v8:13153) Skipped until issue is fixed to reduce noise on alerts. + 'regress/regress-1034322': [SKIP], +}], # gc_fuzzer or deopt_fuzzer + +############################################################################## +['gc_fuzzer or deopt_fuzzer or interrupt_fuzzer', { # BUG(v8:12842) Skipped until we remove flakes on NumFuzz. 'compiler/regress-1224277': [SKIP], 'regress/regress-1220974': [SKIP], 'regress/regress-992389': [SKIP], - - # BUG(v8:13153) Skipped until issue is fixed to reduce noise on alerts. - 'regress/regress-1034322': [SKIP], -}], # gc_fuzzer or deopt_fuzzer +}], # gc_fuzzer or deopt_fuzzer or interrupt_fuzzer ############################################################################## ['endurance_fuzzer', { diff --git a/tools/testrunner/base_runner.py b/tools/testrunner/base_runner.py index b2080c3449..a19bdc1f98 100644 --- a/tools/testrunner/base_runner.py +++ b/tools/testrunner/base_runner.py @@ -573,6 +573,8 @@ class BaseTestRunner(object): self.build_config.is_clang, "is_full_debug": self.build_config.is_full_debug, + "interrupt_fuzzer": + False, "mips_arch_variant": self.build_config.mips_arch_variant, "mode": diff --git a/tools/testrunner/num_fuzzer.py b/tools/testrunner/num_fuzzer.py index 44095d1f4a..9265e32e12 100755 --- a/tools/testrunner/num_fuzzer.py +++ b/tools/testrunner/num_fuzzer.py @@ -132,16 +132,23 @@ class NumFuzzer(base_runner.BaseTestRunner): variables = ( super(NumFuzzer, self)._get_statusfile_variables()) variables.update({ - 'deopt_fuzzer': bool(self.options.stress_deopt), - 'endurance_fuzzer': bool(self.options.combine_tests), - 'gc_stress': bool(self.options.stress_gc), - 'gc_fuzzer': bool(max([self.options.stress_marking, - self.options.stress_scavenge, - self.options.stress_compaction, - self.options.stress_gc, - self.options.stress_delay_tasks, - self.options.stress_stack_size, - self.options.stress_thread_pool_size])), + 'deopt_fuzzer': + bool(self.options.stress_deopt), + 'interrupt_fuzzer': + bool(self.options.stress_interrupt_budget), + 'endurance_fuzzer': + bool(self.options.combine_tests), + 'gc_stress': + bool(self.options.stress_gc), + 'gc_fuzzer': + bool( + max([ + self.options.stress_marking, self.options.stress_scavenge, + self.options.stress_compaction, self.options.stress_gc, + self.options.stress_delay_tasks, + self.options.stress_stack_size, + self.options.stress_thread_pool_size + ])), }) return variables From 0a1f0e335ebe0af856a977d0213474de5488f544 Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Thu, 8 Sep 2022 19:16:34 -0700 Subject: [PATCH 0030/1772] [strings] Fix raw hash lookup for forwarded strings Raw hashes may need to be looked up via the forwarding table when internalized strings are forwarded to external resources. Notably, the megamorphic ICs were not correctly fetching the raw hash. Bug: v8:12007 Change-Id: Ibbc75de57e707788f544fbd1a0f8f0041350e29d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885379 Reviewed-by: Leszek Swirski Commit-Queue: Shu-yu Guo Reviewed-by: Patrick Thier Cr-Commit-Position: refs/heads/main@{#83115} --- src/codegen/code-stub-assembler.cc | 48 ++++++++++++++++--- src/codegen/code-stub-assembler.h | 4 ++ src/codegen/external-reference.cc | 2 + src/codegen/external-reference.h | 1 + src/ic/accessor-assembler.cc | 2 +- src/ic/stub-cache.cc | 2 +- src/objects/name-inl.h | 8 ++++ src/objects/name.h | 1 + src/objects/string-forwarding-table.cc | 5 ++ src/objects/string-forwarding-table.h | 1 + src/objects/string.cc | 2 +- .../shared-external-string-megamorphic-ic.js | 37 ++++++++++++++ 12 files changed, 104 insertions(+), 9 deletions(-) create mode 100644 test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js diff --git a/src/codegen/code-stub-assembler.cc b/src/codegen/code-stub-assembler.cc index d3cd497885..3c87ff7dbc 100644 --- a/src/codegen/code-stub-assembler.cc +++ b/src/codegen/code-stub-assembler.cc @@ -1996,14 +1996,14 @@ TNode CodeStubAssembler::LoadJSReceiverIdentityHash( } TNode CodeStubAssembler::LoadNameHashAssumeComputed(TNode name) { - TNode hash_field = LoadNameRawHashField(name); + TNode hash_field = LoadNameRawHash(name); CSA_DCHECK(this, IsClearWord32(hash_field, Name::kHashNotComputedMask)); return DecodeWord32(hash_field); } TNode CodeStubAssembler::LoadNameHash(TNode name, Label* if_hash_not_computed) { - TNode raw_hash_field = LoadNameRawHashField(name); + TNode raw_hash_field = LoadNameRawHash(name); if (if_hash_not_computed != nullptr) { GotoIf(IsSetWord32(raw_hash_field, Name::kHashNotComputedMask), if_hash_not_computed); @@ -2011,6 +2011,43 @@ TNode CodeStubAssembler::LoadNameHash(TNode name, return DecodeWord32(raw_hash_field); } +TNode CodeStubAssembler::LoadNameRawHash(TNode name) { + TVARIABLE(Uint32T, var_raw_hash); + + Label if_forwarding_index(this), not_forwarding_index(this), done(this); + + TNode raw_hash_field = LoadNameRawHashField(name); + Branch(IsEqualInWord32( + raw_hash_field, Name::HashFieldType::kForwardingIndex), + &if_forwarding_index, ¬_forwarding_index); + + BIND(¬_forwarding_index); + { + var_raw_hash = raw_hash_field; + Goto(&done); + } + + BIND(&if_forwarding_index); + { + TNode function = + ExternalConstant(ExternalReference::raw_hash_from_forward_table()); + const TNode isolate_ptr = + ExternalConstant(ExternalReference::isolate_address(isolate())); + TNode result = UncheckedCast(CallCFunction( + function, MachineType::Uint32(), + std::make_pair(MachineType::Pointer(), isolate_ptr), + std::make_pair( + MachineType::Int32(), + DecodeWord32(raw_hash_field)))); + + var_raw_hash = result; + Goto(&done); + } + + BIND(&done); + return var_raw_hash.value(); +} + TNode CodeStubAssembler::LoadStringLengthAsSmi(TNode string) { return SmiFromIntPtr(LoadStringLengthAsWord(string)); } @@ -6747,8 +6784,7 @@ TNode CodeStubAssembler::IsUniqueNameNoIndex(TNode object) { IsInternalizedStringInstanceType(instance_type), [=] { return IsNotEqualInWord32( - LoadNameRawHashField(CAST(object)), - Name::HashFieldType::kIntegerIndex); + LoadNameRawHash(CAST(object)), Name::HashFieldType::kIntegerIndex); }, [=] { return IsSymbolInstanceType(instance_type); }); } @@ -6765,7 +6801,7 @@ TNode CodeStubAssembler::IsUniqueNameNoCachedIndex( return Select( IsInternalizedStringInstanceType(instance_type), [=] { - return IsSetWord32(LoadNameRawHashField(CAST(object)), + return IsSetWord32(LoadNameRawHash(CAST(object)), Name::kDoesNotContainCachedArrayIndexMask); }, [=] { return IsSymbolInstanceType(instance_type); }); @@ -7251,7 +7287,7 @@ TNode CodeStubAssembler::StringToNumber(TNode input) { TVARIABLE(Number, var_result); // Check if string has a cached array index. - TNode raw_hash_field = LoadNameRawHashField(input); + TNode raw_hash_field = LoadNameRawHash(input); GotoIf(IsSetWord32(raw_hash_field, Name::kDoesNotContainCachedArrayIndexMask), &runtime); diff --git a/src/codegen/code-stub-assembler.h b/src/codegen/code-stub-assembler.h index 6b07a7c55a..1d002085a4 100644 --- a/src/codegen/code-stub-assembler.h +++ b/src/codegen/code-stub-assembler.h @@ -1451,6 +1451,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Label* if_hash_not_computed = nullptr); TNode LoadNameHashAssumeComputed(TNode name); + // Load the Name::RawHash() value of a name as an uint32 value. Follows + // through the forwarding table. + TNode LoadNameRawHash(TNode name); + // Load length field of a String object as Smi value. TNode LoadStringLengthAsSmi(TNode string); // Load length field of a String object as intptr_t value. diff --git a/src/codegen/external-reference.cc b/src/codegen/external-reference.cc index ef1f184c5f..4d30eff328 100644 --- a/src/codegen/external-reference.cc +++ b/src/codegen/external-reference.cc @@ -1024,6 +1024,8 @@ FUNCTION_REFERENCE(try_string_to_index_or_lookup_existing, StringTable::TryStringToIndexOrLookupExisting) FUNCTION_REFERENCE(string_from_forward_table, StringForwardingTable::GetForwardStringAddress) +FUNCTION_REFERENCE(raw_hash_from_forward_table, + StringForwardingTable::GetRawHashStatic) FUNCTION_REFERENCE(string_to_array_index_function, String::ToArrayIndex) FUNCTION_REFERENCE(array_indexof_includes_smi_or_object, ArrayIndexOfIncludesSmiOrObject) diff --git a/src/codegen/external-reference.h b/src/codegen/external-reference.h index b6df8547f5..04a2daf5c2 100644 --- a/src/codegen/external-reference.h +++ b/src/codegen/external-reference.h @@ -208,6 +208,7 @@ class StatsCounter; V(try_string_to_index_or_lookup_existing, \ "try_string_to_index_or_lookup_existing") \ V(string_from_forward_table, "string_from_forward_table") \ + V(raw_hash_from_forward_table, "raw_hash_from_forward_table") \ IF_WASM(V, wasm_call_trap_callback_for_testing, \ "wasm::call_trap_callback_for_testing") \ IF_WASM(V, wasm_f32_ceil, "wasm::f32_ceil_wrapper") \ diff --git a/src/ic/accessor-assembler.cc b/src/ic/accessor-assembler.cc index c723f449c2..d1b99b4711 100644 --- a/src/ic/accessor-assembler.cc +++ b/src/ic/accessor-assembler.cc @@ -2887,7 +2887,7 @@ enum AccessorAssembler::StubCacheTable : int { TNode AccessorAssembler::StubCachePrimaryOffset(TNode name, TNode map) { // Compute the hash of the name (use entire hash field). - TNode raw_hash_field = LoadNameRawHashField(name); + TNode raw_hash_field = LoadNameRawHash(name); CSA_DCHECK(this, Word32Equal(Word32And(raw_hash_field, Int32Constant(Name::kHashNotComputedMask)), diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc index 2a786398cb..4dd60fdfa9 100644 --- a/src/ic/stub-cache.cc +++ b/src/ic/stub-cache.cc @@ -50,7 +50,7 @@ void StubCache::Initialize() { // is scaled by 1 << kCacheIndexShift. int StubCache::PrimaryOffset(Name name, Map map) { // Compute the hash of the name (use entire hash field). - uint32_t field = name.raw_hash_field(); + uint32_t field = name.RawHash(); DCHECK(Name::IsHashFieldComputed(field)); // Using only the low bits in 64-bit mode is unlikely to increase the // risk of collision even if the heap is spread over an area larger than diff --git a/src/objects/name-inl.h b/src/objects/name-inl.h index 570ac0a508..ee3585eb6a 100644 --- a/src/objects/name-inl.h +++ b/src/objects/name-inl.h @@ -190,6 +190,14 @@ uint32_t Name::EnsureRawHash( return String::cast(*this).ComputeAndSetRawHash(access_guard); } +uint32_t Name::RawHash() { + uint32_t field = raw_hash_field(kAcquireLoad); + if (V8_UNLIKELY(IsForwardingIndex(field))) { + return GetRawHashFromForwardingTable(field); + } + return field; +} + uint32_t Name::EnsureHash() { return HashBits::decode(EnsureRawHash()); } uint32_t Name::EnsureHash(const SharedStringAccessGuardIfNeeded& access_guard) { diff --git a/src/objects/name.h b/src/objects/name.h index dcd1b9652d..c2816d04c2 100644 --- a/src/objects/name.h +++ b/src/objects/name.h @@ -190,6 +190,7 @@ class Name : public TorqueGeneratedName { // a forwarding index. inline uint32_t EnsureRawHash(); inline uint32_t EnsureRawHash(const SharedStringAccessGuardIfNeeded&); + inline uint32_t RawHash(); static inline bool IsHashFieldComputed(uint32_t raw_hash_field); static inline bool IsHash(uint32_t raw_hash_field); diff --git a/src/objects/string-forwarding-table.cc b/src/objects/string-forwarding-table.cc index 0a6462b613..53366ed2bb 100644 --- a/src/objects/string-forwarding-table.cc +++ b/src/objects/string-forwarding-table.cc @@ -261,6 +261,11 @@ uint32_t StringForwardingTable::GetRawHash(PtrComprCageBase cage_base, return block->record(index_in_block)->raw_hash(cage_base); } +// static +uint32_t StringForwardingTable::GetRawHashStatic(Isolate* isolate, int index) { + return isolate->string_forwarding_table()->GetRawHash(isolate, index); +} + v8::String::ExternalStringResourceBase* StringForwardingTable::GetExternalResource(int index, bool* is_one_byte) const { CHECK_LT(index, size()); diff --git a/src/objects/string-forwarding-table.h b/src/objects/string-forwarding-table.h index 72e4d73c0b..3cf7d3280b 100644 --- a/src/objects/string-forwarding-table.h +++ b/src/objects/string-forwarding-table.h @@ -56,6 +56,7 @@ class StringForwardingTable { static Address GetForwardStringAddress(Isolate* isolate, int index); V8_EXPORT_PRIVATE uint32_t GetRawHash(PtrComprCageBase cage_base, int index) const; + static uint32_t GetRawHashStatic(Isolate* isolate, int index); v8::String::ExternalStringResourceBase* GetExternalResource( int index, bool* is_one_byte) const; diff --git a/src/objects/string.cc b/src/objects/string.cc index e97fb99669..776ac6e6e7 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -1675,7 +1675,7 @@ uint32_t String::ComputeAndSetRawHash( string = ThinString::cast(string).actual(cage_base); shape = StringShape(string, cage_base); if (length() == string.length()) { - uint32_t raw_hash = string.raw_hash_field(); + uint32_t raw_hash = string.RawHash(); DCHECK(IsHashFieldComputed(raw_hash)); set_raw_hash_field(raw_hash); return raw_hash; diff --git a/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js b/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js new file mode 100644 index 0000000000..19d27a7cf8 --- /dev/null +++ b/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js @@ -0,0 +1,37 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --expose-externalize-string --shared-string-table +// Flags: --allow-natives-syntax + +function set(o, ext_key) { + o[ext_key] = "bar"; +} +function get(o, ext_key) { + o[ext_key]; +} + +%PrepareFunctionForOptimization(set); +%OptimizeFunctionOnNextCall(set); +%PrepareFunctionForOptimization(get); +%OptimizeFunctionOnNextCall(get); + +(function test() { + let ext_key = "AAAAAAAAAAAAAAAAAAAAAA"; + externalizeString(ext_key); + + set({a:1}, ext_key); + set({b:2}, ext_key); + set({c:3}, ext_key); + set({d:4}, ext_key); + set({e:5}, ext_key); + set({f:6}, ext_key); + + get({a:1}, ext_key); + get({b:2}, ext_key); + get({c:3}, ext_key); + get({d:4}, ext_key); + get({e:5}, ext_key); + get({f:6}, ext_key); +})(); From 4ec5bb4f26478088d9718b27b7eccc83d4293b77 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Fri, 9 Sep 2022 17:10:32 +0200 Subject: [PATCH 0031/1772] [maglev] Fix JumpLoop to the current basic block Drive-by improve some tracing too. Bug: v8:7700 Change-Id: I52546a19c15ad1a6bbac1b15cdf8fba33dab1cb7 Fixed: chromium:1361345 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3886873 Commit-Queue: Leszek Swirski Reviewed-by: Victor Gomes Auto-Submit: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83116} --- src/maglev/maglev-graph-builder.cc | 14 +++++++++++++- src/maglev/maglev-graph-builder.h | 8 +++++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 080a952476..23519f7c42 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -76,6 +76,10 @@ MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate, int offset = offset_and_info.first; const compiler::LoopInfo& loop_info = offset_and_info.second; const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset); + DCHECK_NULL(merge_states_[offset]); + if (FLAG_trace_maglev_graph_building) { + std::cout << "- Creating loop merge state at @" << offset << std::endl; + } merge_states_[offset] = MergePointInterpreterFrameState::NewForLoop( *compilation_unit_, offset, NumPredecessors(offset), liveness, &loop_info); @@ -88,6 +92,11 @@ MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate, const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset); DCHECK_EQ(NumPredecessors(offset), 0); + DCHECK_NULL(merge_states_[offset]); + if (FLAG_trace_maglev_graph_building) { + std::cout << "- Creating exception merge state at @" << offset + << std::endl; + } merge_states_[offset] = MergePointInterpreterFrameState::NewForCatchBlock( *compilation_unit_, liveness, offset); } @@ -2503,7 +2512,7 @@ void MaglevGraphBuilder::VisitJumpLoop() { BytecodeOffset(iterator_.current_offset()), compilation_unit_); BasicBlock* block = - target == iterator_.current_offset() + target == block_offset_ ? FinishBlock(next_offset(), {}, &jump_targets_[target]) : FinishBlock(next_offset(), {}, jump_targets_[target].block_ptr()); @@ -2573,6 +2582,9 @@ void MaglevGraphBuilder::MergeDeadIntoFrameState(int target) { // If this merge is the last one which kills a loop merge, remove that // merge state. if (merge_states_[target]->is_unreachable_loop()) { + if (FLAG_trace_maglev_graph_building) { + std::cout << "! Killing loop merge state at @" << target << std::endl; + } merge_states_[target] = nullptr; } } diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index 6658baa08a..9fff3b7275 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -112,10 +112,11 @@ class MaglevGraphBuilder { // Set up edge-split. int predecessor_index = merge_state.predecessor_count() - 1; - if (merge_state.is_unmerged_loop()) { + if (merge_state.is_loop()) { // For loops, the JumpLoop block hasn't been generated yet, and so isn't // in the list of jump targets. IT's the last predecessor, so drop the // index by one. + DCHECK(merge_state.is_unmerged_loop()); predecessor_index--; } BasicBlockRef* old_jump_targets = jump_targets_[offset].Reset(); @@ -244,8 +245,9 @@ class MaglevGraphBuilder { graph()->last_block(), offset); } if (FLAG_trace_maglev_graph_building) { - auto detail = - merge_state->is_exception_handler() ? "exception handler" : "merge"; + auto detail = merge_state->is_exception_handler() ? "exception handler" + : merge_state->is_loop() ? "loop header" + : "merge"; std::cout << "== New block (" << detail << ") ==" << std::endl; } From b11bfc21f288108a3a44eb5efcbc248155be68a9 Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Fri, 9 Sep 2022 11:44:39 -0700 Subject: [PATCH 0032/1772] [strings] Accomodate shared strings in externalizeString() This is a testing function used by d8 to test string externalization. Bug: v8:12007 Change-Id: Ic19f28a42e1f9681ab08c00106788c569639fe7e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3888378 Commit-Queue: Adam Klein Auto-Submit: Shu-yu Guo Reviewed-by: Adam Klein Cr-Commit-Position: refs/heads/main@{#83117} --- src/extensions/externalize-string-extension.cc | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/src/extensions/externalize-string-extension.cc b/src/extensions/externalize-string-extension.cc index dab8c224c4..dea0a9ff20 100644 --- a/src/extensions/externalize-string-extension.cc +++ b/src/extensions/externalize-string-extension.cc @@ -57,6 +57,15 @@ ExternalizeStringExtension::GetNativeFunctionTemplate( } } +namespace { + +bool HasExternalForwardingIndex(Isolate* isolate, Handle string) { + if (!string->IsShared(isolate)) return false; + uint32_t raw_hash = string->raw_hash_field(kAcquireLoad); + return Name::IsExternalForwardingIndex(raw_hash); +} + +} // namespace void ExternalizeStringExtension::Externalize( const v8::FunctionCallbackInfo& args) { @@ -96,7 +105,13 @@ void ExternalizeStringExtension::Externalize( result = Utils::ToLocal(string)->MakeExternal(resource); if (!result) delete resource; } - if (!result) { + // If the string is shared, testing with the combination of + // --shared-string-table and --isolate in d8 may result in races to + // externalize the same string. Those races manifest as externalization + // sometimes failing if another thread won and already forwarded the string to + // the external resource. Don't consider those races as failures. + if (!result && !HasExternalForwardingIndex( + reinterpret_cast(args.GetIsolate()), string)) { args.GetIsolate()->ThrowError("externalizeString() failed."); return; } From 03b99259ff9defb5cba37343713b39dd905f1aef Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Fri, 9 Sep 2022 13:10:45 -0700 Subject: [PATCH 0033/1772] [shared-struct] Support shared objects in v8::Object::GetConstructorName Bug: v8:12547 Change-Id: I6e48ac252361b3f3b495d2feaa5ad4e708e78eb9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3888379 Auto-Submit: Shu-yu Guo Commit-Queue: Shu-yu Guo Commit-Queue: Adam Klein Reviewed-by: Adam Klein Cr-Commit-Position: refs/heads/main@{#83118} --- src/api/api.cc | 11 ++- src/init/heap-symbols.h | 4 ++ src/objects/js-objects.cc | 8 +++ test/cctest/test-api.cc | 101 +++++++++++++-------------- tools/v8heapconst.py | 140 +++++++++++++++++++------------------- 5 files changed, 139 insertions(+), 125 deletions(-) diff --git a/src/api/api.cc b/src/api/api.cc index 6eaea2dc3e..b172a1d07e 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -4736,11 +4736,16 @@ MaybeLocal v8::Object::ObjectProtoToString(Local context) { } Local v8::Object::GetConstructorName() { + // TODO(v8:12547): Consider adding GetConstructorName(Local). auto self = Utils::OpenHandle(this); - // TODO(v8:12547): Support shared objects. - DCHECK(!self->InSharedHeap()); + i::Isolate* i_isolate; + if (self->InSharedWritableHeap()) { + i_isolate = i::Isolate::Current(); + } else { + i_isolate = self->GetIsolate(); + } i::Handle name = - i::JSReceiver::GetConstructorName(self->GetIsolate(), self); + i::JSReceiver::GetConstructorName(i_isolate, self); return Utils::ToLocal(name); } diff --git a/src/init/heap-symbols.h b/src/init/heap-symbols.h index 10312ccc4b..edc998f27e 100644 --- a/src/init/heap-symbols.h +++ b/src/init/heap-symbols.h @@ -157,6 +157,8 @@ V(_, as_string, "as") \ V(_, assert_string, "assert") \ V(_, async_string, "async") \ + V(_, AtomicsCondition_string, "Atomics.Condition") \ + V(_, AtomicsMutex_string, "Atomics.Mutex") \ V(_, auto_string, "auto") \ V(_, await_string, "await") \ V(_, BigInt_string, "BigInt") \ @@ -384,7 +386,9 @@ V(_, SetIterator_string, "Set Iterator") \ V(_, setPrototypeOf_string, "setPrototypeOf") \ V(_, ShadowRealm_string, "ShadowRealm") \ + V(_, SharedArray_string, "SharedArray") \ V(_, SharedArrayBuffer_string, "SharedArrayBuffer") \ + V(_, SharedStruct_string, "SharedStruct") \ V(_, sign_string, "sign") \ V(_, smallestUnit_string, "smallestUnit") \ V(_, source_string, "source") \ diff --git a/src/objects/js-objects.cc b/src/objects/js-objects.cc index cbf6634797..97fe7dc775 100644 --- a/src/objects/js-objects.cc +++ b/src/objects/js-objects.cc @@ -544,6 +544,14 @@ String JSReceiver::class_name() { if (IsJSWeakMap()) return roots.WeakMap_string(); if (IsJSWeakSet()) return roots.WeakSet_string(); if (IsJSGlobalProxy()) return roots.global_string(); + if (IsShared()) { + if (IsJSSharedStruct()) return roots.SharedStruct_string(); + if (IsJSSharedArray()) return roots.SharedArray_string(); + if (IsJSAtomicsMutex()) return roots.AtomicsMutex_string(); + if (IsJSAtomicsCondition()) return roots.AtomicsCondition_string(); + // Other shared values are primitives. + UNREACHABLE(); + } return roots.Object_string(); } diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 7deba60e23..0fafc70114 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -12879,6 +12879,22 @@ TEST(ObjectProtoToStringES6) { } } +namespace { + +void CheckGetConstructorNameOfVar(LocalContext& context, const char* var_name, + const char* constructor_name) { + Local var = context->Global() + ->Get(context.local(), v8_str(var_name)) + .ToLocalChecked(); + CHECK(var->IsObject() && + var->ToObject(context.local()) + .ToLocalChecked() + ->GetConstructorName() + ->Equals(context.local(), v8_str(constructor_name)) + .FromJust()); +} + +} // namespace THREADED_TEST(ObjectGetConstructorName) { v8::Isolate* isolate = CcTest::isolate(); @@ -12897,41 +12913,10 @@ THREADED_TEST(ObjectGetConstructorName) { ->Run(context.local()) .ToLocalChecked(); - Local p = - context->Global()->Get(context.local(), v8_str("p")).ToLocalChecked(); - CHECK(p->IsObject() && - p->ToObject(context.local()) - .ToLocalChecked() - ->GetConstructorName() - ->Equals(context.local(), v8_str("Parent")) - .FromJust()); - - Local c = - context->Global()->Get(context.local(), v8_str("c")).ToLocalChecked(); - CHECK(c->IsObject() && - c->ToObject(context.local()) - .ToLocalChecked() - ->GetConstructorName() - ->Equals(context.local(), v8_str("Child")) - .FromJust()); - - Local x = - context->Global()->Get(context.local(), v8_str("x")).ToLocalChecked(); - CHECK(x->IsObject() && - x->ToObject(context.local()) - .ToLocalChecked() - ->GetConstructorName() - ->Equals(context.local(), v8_str("outer.inner")) - .FromJust()); - - Local child_prototype = - context->Global()->Get(context.local(), v8_str("proto")).ToLocalChecked(); - CHECK(child_prototype->IsObject() && - child_prototype->ToObject(context.local()) - .ToLocalChecked() - ->GetConstructorName() - ->Equals(context.local(), v8_str("Parent")) - .FromJust()); + CheckGetConstructorNameOfVar(context, "p", "Parent"); + CheckGetConstructorNameOfVar(context, "c", "Child"); + CheckGetConstructorNameOfVar(context, "x", "outer.inner"); + CheckGetConstructorNameOfVar(context, "proto", "Parent"); } @@ -12948,25 +12933,37 @@ THREADED_TEST(SubclassGetConstructorName) { ->Run(context.local()) .ToLocalChecked(); - Local p = - context->Global()->Get(context.local(), v8_str("p")).ToLocalChecked(); - CHECK(p->IsObject() && - p->ToObject(context.local()) - .ToLocalChecked() - ->GetConstructorName() - ->Equals(context.local(), v8_str("Parent")) - .FromJust()); - - Local c = - context->Global()->Get(context.local(), v8_str("c")).ToLocalChecked(); - CHECK(c->IsObject() && - c->ToObject(context.local()) - .ToLocalChecked() - ->GetConstructorName() - ->Equals(context.local(), v8_str("Child")) - .FromJust()); + CheckGetConstructorNameOfVar(context, "p", "Parent"); + CheckGetConstructorNameOfVar(context, "c", "Child"); } +UNINITIALIZED_TEST(SharedObjectGetConstructorName) { + i::FLAG_shared_string_table = true; + i::FLAG_harmony_struct = true; + + v8::Isolate::CreateParams create_params; + create_params.array_buffer_allocator = CcTest::array_buffer_allocator(); + v8::Isolate* isolate = v8::Isolate::New(create_params); + { + v8::Isolate::Scope i_scope(isolate); + v8::HandleScope scope(isolate); + LocalContext context(isolate); + + v8_compile( + "var s = new (new SharedStructType(['foo']));" + "var a = new SharedArray(1);" + "var m = new Atomics.Mutex;" + "var c = new Atomics.Condition;") + ->Run(context.local()) + .ToLocalChecked(); + + CheckGetConstructorNameOfVar(context, "s", "SharedStruct"); + CheckGetConstructorNameOfVar(context, "a", "SharedArray"); + CheckGetConstructorNameOfVar(context, "m", "Atomics.Mutex"); + CheckGetConstructorNameOfVar(context, "c", "Atomics.Condition"); + } + isolate->Dispose(); +} bool ApiTestFuzzer::fuzzing_ = false; v8::base::Semaphore ApiTestFuzzer::all_tests_done_(0); diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index 42d72af3a1..1812c48885 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -393,76 +393,76 @@ KNOWN_MAPS = { ("read_only_space", 0x03491): (131, "BasicBlockCountersMarkerMap"), ("read_only_space", 0x034d5): (146, "ArrayBoilerplateDescriptionMap"), ("read_only_space", 0x035d5): (159, "InterceptorInfoMap"), - ("read_only_space", 0x073e9): (132, "PromiseFulfillReactionJobTaskMap"), - ("read_only_space", 0x07411): (133, "PromiseRejectReactionJobTaskMap"), - ("read_only_space", 0x07439): (134, "CallableTaskMap"), - ("read_only_space", 0x07461): (135, "CallbackTaskMap"), - ("read_only_space", 0x07489): (136, "PromiseResolveThenableJobTaskMap"), - ("read_only_space", 0x074b1): (139, "FunctionTemplateInfoMap"), - ("read_only_space", 0x074d9): (140, "ObjectTemplateInfoMap"), - ("read_only_space", 0x07501): (141, "AccessCheckInfoMap"), - ("read_only_space", 0x07529): (142, "AccessorPairMap"), - ("read_only_space", 0x07551): (143, "AliasedArgumentsEntryMap"), - ("read_only_space", 0x07579): (144, "AllocationMementoMap"), - ("read_only_space", 0x075a1): (147, "AsmWasmDataMap"), - ("read_only_space", 0x075c9): (148, "AsyncGeneratorRequestMap"), - ("read_only_space", 0x075f1): (149, "BreakPointMap"), - ("read_only_space", 0x07619): (150, "BreakPointInfoMap"), - ("read_only_space", 0x07641): (151, "CachedTemplateObjectMap"), - ("read_only_space", 0x07669): (152, "CallSiteInfoMap"), - ("read_only_space", 0x07691): (153, "ClassPositionsMap"), - ("read_only_space", 0x076b9): (154, "DebugInfoMap"), - ("read_only_space", 0x076e1): (156, "ErrorStackDataMap"), - ("read_only_space", 0x07709): (158, "FunctionTemplateRareDataMap"), - ("read_only_space", 0x07731): (160, "InterpreterDataMap"), - ("read_only_space", 0x07759): (161, "ModuleRequestMap"), - ("read_only_space", 0x07781): (162, "PromiseCapabilityMap"), - ("read_only_space", 0x077a9): (163, "PromiseOnStackMap"), - ("read_only_space", 0x077d1): (164, "PromiseReactionMap"), - ("read_only_space", 0x077f9): (165, "PropertyDescriptorObjectMap"), - ("read_only_space", 0x07821): (166, "PrototypeInfoMap"), - ("read_only_space", 0x07849): (167, "RegExpBoilerplateDescriptionMap"), - ("read_only_space", 0x07871): (168, "ScriptMap"), - ("read_only_space", 0x07899): (169, "ScriptOrModuleMap"), - ("read_only_space", 0x078c1): (170, "SourceTextModuleInfoEntryMap"), - ("read_only_space", 0x078e9): (171, "StackFrameInfoMap"), - ("read_only_space", 0x07911): (172, "TemplateObjectDescriptionMap"), - ("read_only_space", 0x07939): (173, "Tuple2Map"), - ("read_only_space", 0x07961): (174, "WasmExceptionTagMap"), - ("read_only_space", 0x07989): (175, "WasmIndirectFunctionTableMap"), - ("read_only_space", 0x079b1): (195, "SloppyArgumentsElementsMap"), - ("read_only_space", 0x079d9): (228, "DescriptorArrayMap"), - ("read_only_space", 0x07a01): (217, "UncompiledDataWithoutPreparseDataMap"), - ("read_only_space", 0x07a29): (215, "UncompiledDataWithPreparseDataMap"), - ("read_only_space", 0x07a51): (218, "UncompiledDataWithoutPreparseDataWithJobMap"), - ("read_only_space", 0x07a79): (216, "UncompiledDataWithPreparseDataAndJobMap"), - ("read_only_space", 0x07aa1): (249, "OnHeapBasicBlockProfilerDataMap"), - ("read_only_space", 0x07ac9): (196, "TurbofanBitsetTypeMap"), - ("read_only_space", 0x07af1): (200, "TurbofanUnionTypeMap"), - ("read_only_space", 0x07b19): (199, "TurbofanRangeTypeMap"), - ("read_only_space", 0x07b41): (197, "TurbofanHeapConstantTypeMap"), - ("read_only_space", 0x07b69): (198, "TurbofanOtherNumberConstantTypeMap"), - ("read_only_space", 0x07b91): (245, "InternalClassMap"), - ("read_only_space", 0x07bb9): (256, "SmiPairMap"), - ("read_only_space", 0x07be1): (255, "SmiBoxMap"), - ("read_only_space", 0x07c09): (201, "ExportedSubClassBaseMap"), - ("read_only_space", 0x07c31): (202, "ExportedSubClassMap"), - ("read_only_space", 0x07c59): (226, "AbstractInternalClassSubclass1Map"), - ("read_only_space", 0x07c81): (227, "AbstractInternalClassSubclass2Map"), - ("read_only_space", 0x07ca9): (194, "InternalClassWithSmiElementsMap"), - ("read_only_space", 0x07cd1): (246, "InternalClassWithStructElementsMap"), - ("read_only_space", 0x07cf9): (203, "ExportedSubClass2Map"), - ("read_only_space", 0x07d21): (257, "SortStateMap"), - ("read_only_space", 0x07d49): (263, "WasmStringViewIterMap"), - ("read_only_space", 0x07d71): (145, "AllocationSiteWithWeakNextMap"), - ("read_only_space", 0x07d99): (145, "AllocationSiteWithoutWeakNextMap"), - ("read_only_space", 0x07e65): (137, "LoadHandler1Map"), - ("read_only_space", 0x07e8d): (137, "LoadHandler2Map"), - ("read_only_space", 0x07eb5): (137, "LoadHandler3Map"), - ("read_only_space", 0x07edd): (138, "StoreHandler0Map"), - ("read_only_space", 0x07f05): (138, "StoreHandler1Map"), - ("read_only_space", 0x07f2d): (138, "StoreHandler2Map"), - ("read_only_space", 0x07f55): (138, "StoreHandler3Map"), + ("read_only_space", 0x07455): (132, "PromiseFulfillReactionJobTaskMap"), + ("read_only_space", 0x0747d): (133, "PromiseRejectReactionJobTaskMap"), + ("read_only_space", 0x074a5): (134, "CallableTaskMap"), + ("read_only_space", 0x074cd): (135, "CallbackTaskMap"), + ("read_only_space", 0x074f5): (136, "PromiseResolveThenableJobTaskMap"), + ("read_only_space", 0x0751d): (139, "FunctionTemplateInfoMap"), + ("read_only_space", 0x07545): (140, "ObjectTemplateInfoMap"), + ("read_only_space", 0x0756d): (141, "AccessCheckInfoMap"), + ("read_only_space", 0x07595): (142, "AccessorPairMap"), + ("read_only_space", 0x075bd): (143, "AliasedArgumentsEntryMap"), + ("read_only_space", 0x075e5): (144, "AllocationMementoMap"), + ("read_only_space", 0x0760d): (147, "AsmWasmDataMap"), + ("read_only_space", 0x07635): (148, "AsyncGeneratorRequestMap"), + ("read_only_space", 0x0765d): (149, "BreakPointMap"), + ("read_only_space", 0x07685): (150, "BreakPointInfoMap"), + ("read_only_space", 0x076ad): (151, "CachedTemplateObjectMap"), + ("read_only_space", 0x076d5): (152, "CallSiteInfoMap"), + ("read_only_space", 0x076fd): (153, "ClassPositionsMap"), + ("read_only_space", 0x07725): (154, "DebugInfoMap"), + ("read_only_space", 0x0774d): (156, "ErrorStackDataMap"), + ("read_only_space", 0x07775): (158, "FunctionTemplateRareDataMap"), + ("read_only_space", 0x0779d): (160, "InterpreterDataMap"), + ("read_only_space", 0x077c5): (161, "ModuleRequestMap"), + ("read_only_space", 0x077ed): (162, "PromiseCapabilityMap"), + ("read_only_space", 0x07815): (163, "PromiseOnStackMap"), + ("read_only_space", 0x0783d): (164, "PromiseReactionMap"), + ("read_only_space", 0x07865): (165, "PropertyDescriptorObjectMap"), + ("read_only_space", 0x0788d): (166, "PrototypeInfoMap"), + ("read_only_space", 0x078b5): (167, "RegExpBoilerplateDescriptionMap"), + ("read_only_space", 0x078dd): (168, "ScriptMap"), + ("read_only_space", 0x07905): (169, "ScriptOrModuleMap"), + ("read_only_space", 0x0792d): (170, "SourceTextModuleInfoEntryMap"), + ("read_only_space", 0x07955): (171, "StackFrameInfoMap"), + ("read_only_space", 0x0797d): (172, "TemplateObjectDescriptionMap"), + ("read_only_space", 0x079a5): (173, "Tuple2Map"), + ("read_only_space", 0x079cd): (174, "WasmExceptionTagMap"), + ("read_only_space", 0x079f5): (175, "WasmIndirectFunctionTableMap"), + ("read_only_space", 0x07a1d): (195, "SloppyArgumentsElementsMap"), + ("read_only_space", 0x07a45): (228, "DescriptorArrayMap"), + ("read_only_space", 0x07a6d): (217, "UncompiledDataWithoutPreparseDataMap"), + ("read_only_space", 0x07a95): (215, "UncompiledDataWithPreparseDataMap"), + ("read_only_space", 0x07abd): (218, "UncompiledDataWithoutPreparseDataWithJobMap"), + ("read_only_space", 0x07ae5): (216, "UncompiledDataWithPreparseDataAndJobMap"), + ("read_only_space", 0x07b0d): (249, "OnHeapBasicBlockProfilerDataMap"), + ("read_only_space", 0x07b35): (196, "TurbofanBitsetTypeMap"), + ("read_only_space", 0x07b5d): (200, "TurbofanUnionTypeMap"), + ("read_only_space", 0x07b85): (199, "TurbofanRangeTypeMap"), + ("read_only_space", 0x07bad): (197, "TurbofanHeapConstantTypeMap"), + ("read_only_space", 0x07bd5): (198, "TurbofanOtherNumberConstantTypeMap"), + ("read_only_space", 0x07bfd): (245, "InternalClassMap"), + ("read_only_space", 0x07c25): (256, "SmiPairMap"), + ("read_only_space", 0x07c4d): (255, "SmiBoxMap"), + ("read_only_space", 0x07c75): (201, "ExportedSubClassBaseMap"), + ("read_only_space", 0x07c9d): (202, "ExportedSubClassMap"), + ("read_only_space", 0x07cc5): (226, "AbstractInternalClassSubclass1Map"), + ("read_only_space", 0x07ced): (227, "AbstractInternalClassSubclass2Map"), + ("read_only_space", 0x07d15): (194, "InternalClassWithSmiElementsMap"), + ("read_only_space", 0x07d3d): (246, "InternalClassWithStructElementsMap"), + ("read_only_space", 0x07d65): (203, "ExportedSubClass2Map"), + ("read_only_space", 0x07d8d): (257, "SortStateMap"), + ("read_only_space", 0x07db5): (263, "WasmStringViewIterMap"), + ("read_only_space", 0x07ddd): (145, "AllocationSiteWithWeakNextMap"), + ("read_only_space", 0x07e05): (145, "AllocationSiteWithoutWeakNextMap"), + ("read_only_space", 0x07ed1): (137, "LoadHandler1Map"), + ("read_only_space", 0x07ef9): (137, "LoadHandler2Map"), + ("read_only_space", 0x07f21): (137, "LoadHandler3Map"), + ("read_only_space", 0x07f49): (138, "StoreHandler0Map"), + ("read_only_space", 0x07f71): (138, "StoreHandler1Map"), + ("read_only_space", 0x07f99): (138, "StoreHandler2Map"), + ("read_only_space", 0x07fc1): (138, "StoreHandler3Map"), ("map_space", 0x02139): (2115, "ExternalMap"), ("map_space", 0x02161): (2119, "JSMessageObjectMap"), } From 62635a7270b82053df7927df06a107a328a55f9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marja=20H=C3=B6ltt=C3=A4?= Date: Thu, 8 Sep 2022 11:49:06 +0200 Subject: [PATCH 0034/1772] [rab/gsab] Fix leftover IsTypedArrayElementsKind checks in map transitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With everything related to map transitions, RAB/GSAB typed array elements kinds should behave exactly like non-RAB/GSAB typed array elements kinds. Bug: chromium:1360736, v8:11111 Change-Id: Ie5cef928a25856f0c476653275066b49dfee6e41 Fixed: chromium:1360736 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879497 Reviewed-by: Shu-yu Guo Auto-Submit: Marja Hölttä Commit-Queue: Shu-yu Guo Cr-Commit-Position: refs/heads/main@{#83119} --- src/objects/elements-kind.cc | 6 +++--- src/objects/elements-kind.h | 5 +++-- src/objects/js-objects.cc | 2 +- src/objects/map-updater.cc | 4 ++-- test/mjsunit/regress/regress-crbug-1360736.js | 9 +++++++++ 5 files changed, 18 insertions(+), 8 deletions(-) create mode 100644 test/mjsunit/regress/regress-crbug-1360736.js diff --git a/src/objects/elements-kind.cc b/src/objects/elements-kind.cc index aba1f6cf93..2d7999f38f 100644 --- a/src/objects/elements-kind.cc +++ b/src/objects/elements-kind.cc @@ -73,7 +73,7 @@ int ElementsKindToByteSize(ElementsKind elements_kind) { int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind) { static_assert(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize); - if (IsTypedArrayElementsKind(elements_kind)) { + if (IsTypedArrayOrRabGsabTypedArrayElementsKind(elements_kind)) { return 0; } else { return FixedArray::kHeaderSize - kHeapObjectTag; @@ -178,8 +178,8 @@ bool IsMoreGeneralElementsKindTransition(ElementsKind from_kind, ElementsKind to_kind) { if (!IsFastElementsKind(from_kind)) return false; if (!IsFastTransitionTarget(to_kind)) return false; - DCHECK(!IsTypedArrayElementsKind(from_kind)); - DCHECK(!IsTypedArrayElementsKind(to_kind)); + DCHECK(!IsTypedArrayOrRabGsabTypedArrayElementsKind(from_kind)); + DCHECK(!IsTypedArrayOrRabGsabTypedArrayElementsKind(to_kind)); switch (from_kind) { case PACKED_SMI_ELEMENTS: return to_kind != PACKED_SMI_ELEMENTS; diff --git a/src/objects/elements-kind.h b/src/objects/elements-kind.h index e8fefc7976..12fba52ca4 100644 --- a/src/objects/elements-kind.h +++ b/src/objects/elements-kind.h @@ -239,7 +239,7 @@ inline bool IsSharedArrayElementsKind(ElementsKind kind) { inline bool IsTerminalElementsKind(ElementsKind kind) { return kind == TERMINAL_FAST_ELEMENTS_KIND || - IsTypedArrayElementsKind(kind) || + IsTypedArrayOrRabGsabTypedArrayElementsKind(kind) || IsRabGsabTypedArrayElementsKind(kind); } @@ -249,7 +249,8 @@ inline bool IsFastElementsKind(ElementsKind kind) { } inline bool IsTransitionElementsKind(ElementsKind kind) { - return IsFastElementsKind(kind) || IsTypedArrayElementsKind(kind) || + return IsFastElementsKind(kind) || + IsTypedArrayOrRabGsabTypedArrayElementsKind(kind) || kind == FAST_SLOPPY_ARGUMENTS_ELEMENTS || kind == FAST_STRING_WRAPPER_ELEMENTS; } diff --git a/src/objects/js-objects.cc b/src/objects/js-objects.cc index 97fe7dc775..02c5128ea9 100644 --- a/src/objects/js-objects.cc +++ b/src/objects/js-objects.cc @@ -4114,7 +4114,7 @@ bool TestElementsIntegrityLevel(JSObject object, PropertyAttributes level) { NumberDictionary::cast(object.elements()), object.GetReadOnlyRoots(), level); } - if (IsTypedArrayElementsKind(kind)) { + if (IsTypedArrayOrRabGsabTypedArrayElementsKind(kind)) { if (level == FROZEN && JSArrayBufferView::cast(object).byte_length() > 0) { return false; // TypedArrays with elements can't be frozen. } diff --git a/src/objects/map-updater.cc b/src/objects/map-updater.cc index a2769a768d..be6568aac4 100644 --- a/src/objects/map-updater.cc +++ b/src/objects/map-updater.cc @@ -371,7 +371,7 @@ base::Optional MapUpdater::TryUpdateNoLock(Isolate* isolate, Map old_map, // the integrity level transition sets the elements to dictionary mode. DCHECK(to_kind == DICTIONARY_ELEMENTS || to_kind == SLOW_STRING_WRAPPER_ELEMENTS || - IsTypedArrayElementsKind(to_kind) || + IsTypedArrayOrRabGsabTypedArrayElementsKind(to_kind) || IsAnyHoleyNonextensibleElementsKind(to_kind)); to_kind = info.integrity_level_source_map.elements_kind(); } @@ -584,7 +584,7 @@ MapUpdater::State MapUpdater::FindRootMap() { // the seal transitions), so change {to_kind} accordingly. DCHECK(to_kind == DICTIONARY_ELEMENTS || to_kind == SLOW_STRING_WRAPPER_ELEMENTS || - IsTypedArrayElementsKind(to_kind) || + IsTypedArrayOrRabGsabTypedArrayElementsKind(to_kind) || IsAnyNonextensibleElementsKind(to_kind)); to_kind = integrity_source_map_->elements_kind(); } diff --git a/test/mjsunit/regress/regress-crbug-1360736.js b/test/mjsunit/regress/regress-crbug-1360736.js new file mode 100644 index 0000000000..85dd3232c1 --- /dev/null +++ b/test/mjsunit/regress/regress-crbug-1360736.js @@ -0,0 +1,9 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +const rab = new ArrayBuffer(ArrayBuffer, {"maxByteLength": 7158170}); +const ta = new Uint8Array(rab); +const proxy = new Proxy(ta, {}); +proxy.valueOf = () => {}; +Object.seal(proxy); From 2987a4ea51d5ca72fbb02bdfedbe3ffc9c18f360 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Fri, 9 Sep 2022 10:55:42 +0800 Subject: [PATCH 0035/1772] [riscv] Port [log][compiler] Enable first-execution logging Port commit b257641833f41bf41ae514bd5d09533ea8e376c1 Bug: v8:13146 Change-Id: Ie3727e873614f6e3e0749cb8cc10b287cd9643c2 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885380 Reviewed-by: ji qiu Commit-Queue: Yahan Lu Cr-Commit-Position: refs/heads/main@{#83120} --- src/codegen/riscv/macro-assembler-riscv.cc | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/codegen/riscv/macro-assembler-riscv.cc b/src/codegen/riscv/macro-assembler-riscv.cc index b403b8eb9c..4f4b443c51 100644 --- a/src/codegen/riscv/macro-assembler-riscv.cc +++ b/src/codegen/riscv/macro-assembler-riscv.cc @@ -213,18 +213,29 @@ void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( DCHECK(!AreAliased(flags, feedback_vector)); UseScratchRegisterScope temps(this); temps.Include(t0, t1); - Label maybe_has_optimized_code; + Label maybe_has_optimized_code, maybe_needs_logging; // Check if optimized code is available. { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); And(scratch, flags, Operand(FeedbackVector::kFlagsTieringStateIsAnyRequested)); - Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg), + Branch(&maybe_needs_logging, eq, scratch, Operand(zero_reg), Label::Distance::kNear); } GenerateTailCallToReturnedCode(Runtime::kCompileOptimized); + bind(&maybe_needs_logging); + { + UseScratchRegisterScope temps(this); + Register scratch = temps.Acquire(); + And(scratch, flags, Operand(FeedbackVector::LogNextExecutionBit::kMask)); + Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg), + Label::Distance::kNear); + } + + GenerateTailCallToReturnedCode(Runtime::kFunctionLogNextExecution); + bind(&maybe_has_optimized_code); Register optimized_code_entry = flags; LoadAnyTaggedField( From 5ccb7f2e4621927bc6afe118317c358a6fbc9d8e Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Sat, 10 Sep 2022 15:03:07 +0000 Subject: [PATCH 0036/1772] Revert "[strings] Fix raw hash lookup for forwarded strings" This reverts commit 0a1f0e335ebe0af856a977d0213474de5488f544. Reason for revert: JetStream regressions: https://bugs.chromium.org/p/chromium/issues/detail?id=1362212 Original change's description: > [strings] Fix raw hash lookup for forwarded strings > > Raw hashes may need to be looked up via the forwarding table when > internalized strings are forwarded to external resources. Notably, the > megamorphic ICs were not correctly fetching the raw hash. > > Bug: v8:12007 > Change-Id: Ibbc75de57e707788f544fbd1a0f8f0041350e29d > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885379 > Reviewed-by: Leszek Swirski > Commit-Queue: Shu-yu Guo > Reviewed-by: Patrick Thier > Cr-Commit-Position: refs/heads/main@{#83115} Bug: v8:12007 Change-Id: I64853d55ea32b04b3325377c0c1affd0c1a27c6e No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3887949 Commit-Queue: Rubber Stamper Bot-Commit: Rubber Stamper Auto-Submit: Shu-yu Guo Owners-Override: Shu-yu Guo Cr-Commit-Position: refs/heads/main@{#83121} --- src/codegen/code-stub-assembler.cc | 48 +++---------------- src/codegen/code-stub-assembler.h | 4 -- src/codegen/external-reference.cc | 2 - src/codegen/external-reference.h | 1 - src/ic/accessor-assembler.cc | 2 +- src/ic/stub-cache.cc | 2 +- src/objects/name-inl.h | 8 ---- src/objects/name.h | 1 - src/objects/string-forwarding-table.cc | 5 -- src/objects/string-forwarding-table.h | 1 - src/objects/string.cc | 2 +- .../shared-external-string-megamorphic-ic.js | 37 -------------- 12 files changed, 9 insertions(+), 104 deletions(-) delete mode 100644 test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js diff --git a/src/codegen/code-stub-assembler.cc b/src/codegen/code-stub-assembler.cc index 3c87ff7dbc..d3cd497885 100644 --- a/src/codegen/code-stub-assembler.cc +++ b/src/codegen/code-stub-assembler.cc @@ -1996,14 +1996,14 @@ TNode CodeStubAssembler::LoadJSReceiverIdentityHash( } TNode CodeStubAssembler::LoadNameHashAssumeComputed(TNode name) { - TNode hash_field = LoadNameRawHash(name); + TNode hash_field = LoadNameRawHashField(name); CSA_DCHECK(this, IsClearWord32(hash_field, Name::kHashNotComputedMask)); return DecodeWord32(hash_field); } TNode CodeStubAssembler::LoadNameHash(TNode name, Label* if_hash_not_computed) { - TNode raw_hash_field = LoadNameRawHash(name); + TNode raw_hash_field = LoadNameRawHashField(name); if (if_hash_not_computed != nullptr) { GotoIf(IsSetWord32(raw_hash_field, Name::kHashNotComputedMask), if_hash_not_computed); @@ -2011,43 +2011,6 @@ TNode CodeStubAssembler::LoadNameHash(TNode name, return DecodeWord32(raw_hash_field); } -TNode CodeStubAssembler::LoadNameRawHash(TNode name) { - TVARIABLE(Uint32T, var_raw_hash); - - Label if_forwarding_index(this), not_forwarding_index(this), done(this); - - TNode raw_hash_field = LoadNameRawHashField(name); - Branch(IsEqualInWord32( - raw_hash_field, Name::HashFieldType::kForwardingIndex), - &if_forwarding_index, ¬_forwarding_index); - - BIND(¬_forwarding_index); - { - var_raw_hash = raw_hash_field; - Goto(&done); - } - - BIND(&if_forwarding_index); - { - TNode function = - ExternalConstant(ExternalReference::raw_hash_from_forward_table()); - const TNode isolate_ptr = - ExternalConstant(ExternalReference::isolate_address(isolate())); - TNode result = UncheckedCast(CallCFunction( - function, MachineType::Uint32(), - std::make_pair(MachineType::Pointer(), isolate_ptr), - std::make_pair( - MachineType::Int32(), - DecodeWord32(raw_hash_field)))); - - var_raw_hash = result; - Goto(&done); - } - - BIND(&done); - return var_raw_hash.value(); -} - TNode CodeStubAssembler::LoadStringLengthAsSmi(TNode string) { return SmiFromIntPtr(LoadStringLengthAsWord(string)); } @@ -6784,7 +6747,8 @@ TNode CodeStubAssembler::IsUniqueNameNoIndex(TNode object) { IsInternalizedStringInstanceType(instance_type), [=] { return IsNotEqualInWord32( - LoadNameRawHash(CAST(object)), Name::HashFieldType::kIntegerIndex); + LoadNameRawHashField(CAST(object)), + Name::HashFieldType::kIntegerIndex); }, [=] { return IsSymbolInstanceType(instance_type); }); } @@ -6801,7 +6765,7 @@ TNode CodeStubAssembler::IsUniqueNameNoCachedIndex( return Select( IsInternalizedStringInstanceType(instance_type), [=] { - return IsSetWord32(LoadNameRawHash(CAST(object)), + return IsSetWord32(LoadNameRawHashField(CAST(object)), Name::kDoesNotContainCachedArrayIndexMask); }, [=] { return IsSymbolInstanceType(instance_type); }); @@ -7287,7 +7251,7 @@ TNode CodeStubAssembler::StringToNumber(TNode input) { TVARIABLE(Number, var_result); // Check if string has a cached array index. - TNode raw_hash_field = LoadNameRawHash(input); + TNode raw_hash_field = LoadNameRawHashField(input); GotoIf(IsSetWord32(raw_hash_field, Name::kDoesNotContainCachedArrayIndexMask), &runtime); diff --git a/src/codegen/code-stub-assembler.h b/src/codegen/code-stub-assembler.h index 1d002085a4..6b07a7c55a 100644 --- a/src/codegen/code-stub-assembler.h +++ b/src/codegen/code-stub-assembler.h @@ -1451,10 +1451,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Label* if_hash_not_computed = nullptr); TNode LoadNameHashAssumeComputed(TNode name); - // Load the Name::RawHash() value of a name as an uint32 value. Follows - // through the forwarding table. - TNode LoadNameRawHash(TNode name); - // Load length field of a String object as Smi value. TNode LoadStringLengthAsSmi(TNode string); // Load length field of a String object as intptr_t value. diff --git a/src/codegen/external-reference.cc b/src/codegen/external-reference.cc index 4d30eff328..ef1f184c5f 100644 --- a/src/codegen/external-reference.cc +++ b/src/codegen/external-reference.cc @@ -1024,8 +1024,6 @@ FUNCTION_REFERENCE(try_string_to_index_or_lookup_existing, StringTable::TryStringToIndexOrLookupExisting) FUNCTION_REFERENCE(string_from_forward_table, StringForwardingTable::GetForwardStringAddress) -FUNCTION_REFERENCE(raw_hash_from_forward_table, - StringForwardingTable::GetRawHashStatic) FUNCTION_REFERENCE(string_to_array_index_function, String::ToArrayIndex) FUNCTION_REFERENCE(array_indexof_includes_smi_or_object, ArrayIndexOfIncludesSmiOrObject) diff --git a/src/codegen/external-reference.h b/src/codegen/external-reference.h index 04a2daf5c2..b6df8547f5 100644 --- a/src/codegen/external-reference.h +++ b/src/codegen/external-reference.h @@ -208,7 +208,6 @@ class StatsCounter; V(try_string_to_index_or_lookup_existing, \ "try_string_to_index_or_lookup_existing") \ V(string_from_forward_table, "string_from_forward_table") \ - V(raw_hash_from_forward_table, "raw_hash_from_forward_table") \ IF_WASM(V, wasm_call_trap_callback_for_testing, \ "wasm::call_trap_callback_for_testing") \ IF_WASM(V, wasm_f32_ceil, "wasm::f32_ceil_wrapper") \ diff --git a/src/ic/accessor-assembler.cc b/src/ic/accessor-assembler.cc index d1b99b4711..c723f449c2 100644 --- a/src/ic/accessor-assembler.cc +++ b/src/ic/accessor-assembler.cc @@ -2887,7 +2887,7 @@ enum AccessorAssembler::StubCacheTable : int { TNode AccessorAssembler::StubCachePrimaryOffset(TNode name, TNode map) { // Compute the hash of the name (use entire hash field). - TNode raw_hash_field = LoadNameRawHash(name); + TNode raw_hash_field = LoadNameRawHashField(name); CSA_DCHECK(this, Word32Equal(Word32And(raw_hash_field, Int32Constant(Name::kHashNotComputedMask)), diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc index 4dd60fdfa9..2a786398cb 100644 --- a/src/ic/stub-cache.cc +++ b/src/ic/stub-cache.cc @@ -50,7 +50,7 @@ void StubCache::Initialize() { // is scaled by 1 << kCacheIndexShift. int StubCache::PrimaryOffset(Name name, Map map) { // Compute the hash of the name (use entire hash field). - uint32_t field = name.RawHash(); + uint32_t field = name.raw_hash_field(); DCHECK(Name::IsHashFieldComputed(field)); // Using only the low bits in 64-bit mode is unlikely to increase the // risk of collision even if the heap is spread over an area larger than diff --git a/src/objects/name-inl.h b/src/objects/name-inl.h index ee3585eb6a..570ac0a508 100644 --- a/src/objects/name-inl.h +++ b/src/objects/name-inl.h @@ -190,14 +190,6 @@ uint32_t Name::EnsureRawHash( return String::cast(*this).ComputeAndSetRawHash(access_guard); } -uint32_t Name::RawHash() { - uint32_t field = raw_hash_field(kAcquireLoad); - if (V8_UNLIKELY(IsForwardingIndex(field))) { - return GetRawHashFromForwardingTable(field); - } - return field; -} - uint32_t Name::EnsureHash() { return HashBits::decode(EnsureRawHash()); } uint32_t Name::EnsureHash(const SharedStringAccessGuardIfNeeded& access_guard) { diff --git a/src/objects/name.h b/src/objects/name.h index c2816d04c2..dcd1b9652d 100644 --- a/src/objects/name.h +++ b/src/objects/name.h @@ -190,7 +190,6 @@ class Name : public TorqueGeneratedName { // a forwarding index. inline uint32_t EnsureRawHash(); inline uint32_t EnsureRawHash(const SharedStringAccessGuardIfNeeded&); - inline uint32_t RawHash(); static inline bool IsHashFieldComputed(uint32_t raw_hash_field); static inline bool IsHash(uint32_t raw_hash_field); diff --git a/src/objects/string-forwarding-table.cc b/src/objects/string-forwarding-table.cc index 53366ed2bb..0a6462b613 100644 --- a/src/objects/string-forwarding-table.cc +++ b/src/objects/string-forwarding-table.cc @@ -261,11 +261,6 @@ uint32_t StringForwardingTable::GetRawHash(PtrComprCageBase cage_base, return block->record(index_in_block)->raw_hash(cage_base); } -// static -uint32_t StringForwardingTable::GetRawHashStatic(Isolate* isolate, int index) { - return isolate->string_forwarding_table()->GetRawHash(isolate, index); -} - v8::String::ExternalStringResourceBase* StringForwardingTable::GetExternalResource(int index, bool* is_one_byte) const { CHECK_LT(index, size()); diff --git a/src/objects/string-forwarding-table.h b/src/objects/string-forwarding-table.h index 3cf7d3280b..72e4d73c0b 100644 --- a/src/objects/string-forwarding-table.h +++ b/src/objects/string-forwarding-table.h @@ -56,7 +56,6 @@ class StringForwardingTable { static Address GetForwardStringAddress(Isolate* isolate, int index); V8_EXPORT_PRIVATE uint32_t GetRawHash(PtrComprCageBase cage_base, int index) const; - static uint32_t GetRawHashStatic(Isolate* isolate, int index); v8::String::ExternalStringResourceBase* GetExternalResource( int index, bool* is_one_byte) const; diff --git a/src/objects/string.cc b/src/objects/string.cc index 776ac6e6e7..e97fb99669 100644 --- a/src/objects/string.cc +++ b/src/objects/string.cc @@ -1675,7 +1675,7 @@ uint32_t String::ComputeAndSetRawHash( string = ThinString::cast(string).actual(cage_base); shape = StringShape(string, cage_base); if (length() == string.length()) { - uint32_t raw_hash = string.RawHash(); + uint32_t raw_hash = string.raw_hash_field(); DCHECK(IsHashFieldComputed(raw_hash)); set_raw_hash_field(raw_hash); return raw_hash; diff --git a/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js b/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js deleted file mode 100644 index 19d27a7cf8..0000000000 --- a/test/mjsunit/shared-memory/shared-external-string-megamorphic-ic.js +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2022 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. -// -// Flags: --expose-externalize-string --shared-string-table -// Flags: --allow-natives-syntax - -function set(o, ext_key) { - o[ext_key] = "bar"; -} -function get(o, ext_key) { - o[ext_key]; -} - -%PrepareFunctionForOptimization(set); -%OptimizeFunctionOnNextCall(set); -%PrepareFunctionForOptimization(get); -%OptimizeFunctionOnNextCall(get); - -(function test() { - let ext_key = "AAAAAAAAAAAAAAAAAAAAAA"; - externalizeString(ext_key); - - set({a:1}, ext_key); - set({b:2}, ext_key); - set({c:3}, ext_key); - set({d:4}, ext_key); - set({e:5}, ext_key); - set({f:6}, ext_key); - - get({a:1}, ext_key); - get({b:2}, ext_key); - get({c:3}, ext_key); - get({d:4}, ext_key); - get({e:5}, ext_key); - get({f:6}, ext_key); -})(); From b1a147705e46abf76d233918e1d4502b8b3d1c88 Mon Sep 17 00:00:00 2001 From: Lu Yahan Date: Fri, 9 Sep 2022 20:52:52 +0800 Subject: [PATCH 0037/1772] [riscv] Port [wasm][liftoff] Fix and cleanup tracing of return value Port commit 6f9e71fa74eb589a48c0f5065ac961a64cb515a3 Change-Id: Id5226e0892f67573cea289040c2d5aa85f159478 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3886478 Commit-Queue: ji qiu Reviewed-by: ji qiu Auto-Submit: Yahan Lu Cr-Commit-Position: refs/heads/main@{#83122} --- src/wasm/baseline/riscv/liftoff-assembler-riscv.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h index 925b02a462..fb2dcf62cc 100644 --- a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h +++ b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h @@ -157,6 +157,10 @@ void LiftoffAssembler::PatchPrepareStackFrame( GenPCRelativeJump(kScratchReg, imm32); } +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { + SubWord(dst, fp, offset); +} + void LiftoffAssembler::FinishCode() { ForceConstantPoolEmissionWithoutJump(); } void LiftoffAssembler::AbortCompilation() { AbortedCodeGeneration(); } From 093e68b408d8fceaa68f14185e17da90cb272050 Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Fri, 9 Sep 2022 20:21:33 -0700 Subject: [PATCH 0038/1772] [Temporal] Sync the Parser to the latest spec. Add TimeHourMinuteBasicFormatNotAmbiguousWithMonthDay TimeZoneNumericUTCOffsetNotAmbiguousWithDayOfMonth TimeZoneNumericUTCOffsetNotAmbiguousWithMonth TimeZoneIdentifier, UnpaddedHour, TimeZoneIANALegacyName productions. Sync the spec of TemporalInstantString, TemporalTimeString TimeZone, TimeZoneBracketedAnnotation, TemporalTimeZoneString, ToTemporalTimeZone, TimeZoneIANAName productions. Fix bug in ScanCalendarDateTimeTimeRequired, ToTemporalTimeZone Change name from Handle to Handle to hold undefined Update parser tests accordingly. Spec Text: https://tc39.es/proposal-temporal/#sec-temporal-iso8601grammar https://tc39.es/proposal-temporal/#sec-temporal-totemporaltimezone Related PR changes: https://github.com/tc39/proposal-temporal/pull/2284 https://github.com/tc39/proposal-temporal/pull/2287 https://github.com/tc39/proposal-temporal/pull/2345 Bug: v8:11544 Change-Id: I6f1a5e5dedba461db9f36abe76fa97119c1f8c2c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3822342 Reviewed-by: Shu-yu Guo Commit-Queue: Frank Tang Cr-Commit-Position: refs/heads/main@{#83123} --- src/objects/js-temporal-objects.cc | 42 +- src/temporal/temporal-parser.cc | 700 ++++++------------ test/test262/test262.status | 63 -- .../temporal/temporal-parser-unittest.cc | 282 +++---- 4 files changed, 389 insertions(+), 698 deletions(-) diff --git a/src/objects/js-temporal-objects.cc b/src/objects/js-temporal-objects.cc index 6b10a0bb37..eb125a3707 100644 --- a/src/objects/js-temporal-objects.cc +++ b/src/objects/js-temporal-objects.cc @@ -152,7 +152,7 @@ struct DateDurationRecord { struct TimeZoneRecord { bool z; Handle offset_string; // String or Undefined - Handle name; + Handle name; // String or Undefined }; struct ZonedDateTimeRecord { @@ -3007,27 +3007,15 @@ MaybeHandle ToTemporalTimeZone( Handle()); // 4. If parseResult.[[Name]] is not undefined, then - if (parse_result.name->length() > 0) { + if (!parse_result.name->IsUndefined()) { + DCHECK(parse_result.name->IsString()); // a. Let name be parseResult.[[Name]]. - Handle name = parse_result.name; + Handle name = Handle::cast(parse_result.name); // b. If ParseText(StringToCodePoints(name, TimeZoneNumericUTCOffset)) is - // not a List of errors, then + // a List of errors, then base::Optional parsed_offset = TemporalParser::ParseTimeZoneNumericUTCOffset(isolate, name); - if (parsed_offset.has_value()) { - // i. If parseResult.[[OffsetString]] is not undefined, and ! - // ParseTimeZoneOffsetString(parseResult.[[OffsetString]]) ≠ ! - // ParseTimeZoneOffsetString(name), throw a RangeError exception. - if (!parse_result.offset_string->IsUndefined() && - ParseTimeZoneOffsetString( - isolate, Handle::cast(parse_result.offset_string)) - .ToChecked() != - ParseTimeZoneOffsetString(isolate, name).ToChecked()) { - THROW_NEW_ERROR(isolate, NEW_TEMPORAL_INVALID_ARG_RANGE_ERROR(), - JSReceiver); - } - // c. Else, - } else { + if (!parsed_offset.has_value()) { // i. If ! IsValidTimeZoneName(name) is false, throw a RangeError // exception. if (!IsValidTimeZoneName(isolate, name)) { @@ -3036,10 +3024,9 @@ MaybeHandle ToTemporalTimeZone( } // ii. Set name to ! CanonicalizeTimeZoneName(name). name = CanonicalizeTimeZoneName(isolate, name); - - // d. Return ! CreateTemporalTimeZone(name). - return temporal::CreateTemporalTimeZone(isolate, name); } + // c. Return ! CreateTemporalTimeZone(name). + return temporal::CreateTemporalTimeZone(isolate, name); } // 5. If parseResult.[[Z]] is true, return ! CreateTemporalTimeZone("UTC"). if (parse_result.z) { @@ -3654,6 +3641,7 @@ Maybe ParseTemporalRelativeToString( // b. Let offsetString be undefined. result.time_zone.offset_string = isolate->factory()->undefined_value(); // c. Let timeZone be undefined. + result.time_zone.name = isolate->factory()->undefined_value(); } // 5. Return the Record { [[Year]]: result.[[Year]], [[Month]]: // result.[[Month]], [[Day]]: result.[[Day]], [[Hour]]: result.[[Hour]], @@ -3932,7 +3920,7 @@ Maybe ParseTemporalTimeZoneString(Isolate* isolate, // productions, or undefined if not present. // 4. If name is empty, then // a. Set name to undefined. - Handle name = isolate->factory()->empty_string(); + Handle name = isolate->factory()->undefined_value(); // 5. Else, // a. Set name to CodePointsToString(name). if (parsed->tzi_name_length > 0) { @@ -8195,12 +8183,14 @@ MaybeHandle ToRelativeTemporalObject(Isolate* isolate, offset_string_obj = result.time_zone.offset_string; // e. Let timeZoneName be result.[[TimeZoneIANAName]]. - Handle time_zone_name = result.time_zone.name; + Handle time_zone_name_obj = result.time_zone.name; // f. If timeZoneName is not undefined, then - if (!time_zone_name.is_null()) { + if (!time_zone_name_obj->IsUndefined()) { // i. If ParseText(StringToCodePoints(timeZoneName), // TimeZoneNumericUTCOffset) is a List of errors, then + DCHECK(time_zone_name_obj->IsString()); + Handle time_zone_name = Handle::cast(time_zone_name_obj); base::Optional parsed = TemporalParser::ParseTimeZoneNumericUTCOffset(isolate, time_zone_name); @@ -16066,8 +16056,8 @@ MaybeHandle ToTemporalZonedDateTime( Handle()); // e. Assert: timeZoneName is not undefined. - Handle time_zone_name = result.time_zone.name; - DCHECK(!time_zone_name.is_null()); + DCHECK(!result.time_zone.name->IsUndefined()); + Handle time_zone_name = Handle::cast(result.time_zone.name); // f. If ParseText(StringToCodePoints(timeZoneName), // TimeZoneNumericUTCOffset) is a List of errors, then diff --git a/src/temporal/temporal-parser.cc b/src/temporal/temporal-parser.cc index 5e07ac8d6f..450502aa06 100644 --- a/src/temporal/temporal-parser.cc +++ b/src/temporal/temporal-parser.cc @@ -126,6 +126,22 @@ int32_t ScanHour(base::Vector str, int32_t s, int32_t* out) { return ScanTwoDigitsExpectRange(str, s, 0, 23, out); } +// UnpaddedHour : +// DecimalDigit +// 1 DecimalDigit +// 20 +// 21 +// 22 +// 23 +template +int32_t ScanUnpaddedHour(base::Vector str, int32_t s) { + int32_t dummy; + int32_t len = ScanTwoDigitsExpectRange(str, s, 10, 23, &dummy); + if (len > 0) return len; + if (str.length() >= (s + 1) && IsDecimalDigit(str[s])) return 1; + return 0; +} + // MinuteSecond: // [0 1 2 3 4 5] Digit template @@ -366,83 +382,14 @@ int32_t ScanDate(base::Vector str, int32_t s, ParsedISO8601Result* r) { return cur + len - s; } -// TimeHourNotValidMonth : one of -// `00` `13` `14` `15` `16` `17` `18` `19` `20` `21` `23` +// DateMonthWithThirtyOneDays : one of +// 01 03 05 07 08 10 12 template -int32_t ScanTimeHourNotValidMonth(base::Vector str, int32_t s, - int32_t* out) { - return ScanTwoDigitsExpectZeroOrRange(str, s, 13, 23, out); -} - -// TimeHourNotThirtyOneDayMonth : one of -// `02` `04` `06` `09` `11` -template -int32_t ScanTimeHourNotThirtyOneDayMonth(base::Vector str, int32_t s, - int32_t* out) { - return HasTwoDigits(str, s, out) && - (*out == 2 || *out == 4 || *out == 6 || *out == 9 || - *out == 11) - ? 2 - : 0; -} - -// TimeHourTwoOnly : `02` -template -int32_t ScanTimeHourTwoOnly(base::Vector str, int32_t s, int32_t* out) { - return ScanTwoDigitsExpectValue(str, s, 2, out); -} - -// TimeMinuteNotValidDay : -// `00` -// `32` -// `33` -// `34` -// `35` -// `36` -// `37` -// `38` -// `39` -// `4` DecimalDigit -// `5` DecimalDigit -// `60` -template -int32_t ScanTimeMinuteNotValidDay(base::Vector str, int32_t s, - int32_t* out) { - return ScanTwoDigitsExpectZeroOrRange(str, s, 32, 60, out); -} - -// TimeMinuteThirtyOnly : `30` -template -int32_t ScanTimeMinuteThirtyOnly(base::Vector str, int32_t s, - int32_t* out) { - return ScanTwoDigitsExpectValue(str, s, 30, out); -} - -// TimeMinuteThirtyOneOnly : `31` -template -int32_t ScanTimeMinuteThirtyOneOnly(base::Vector str, int32_t s, - int32_t* out) { - return ScanTwoDigitsExpectValue(str, s, 31, out); -} - -// TimeSecondNotValidMonth : -// `00` -// `13` -// `14` -// `15` -// `16` -// `17` -// `18` -// `19` -// `2` DecimalDigit -// `3` DecimalDigit -// `4` DecimalDigit -// `5` DecimalDigit -// `60` -template -int32_t ScanTimeSecondNotValidMonth(base::Vector str, int32_t s, - int32_t* out) { - return ScanTwoDigitsExpectZeroOrRange(str, s, 13, 60, out); +int32_t ScanDateMonthWithThirtyOneDays(base::Vector str, int32_t s) { + int32_t value; + if (!HasTwoDigits(str, s, &value)) return false; + return value == 1 || value == 3 || value == 5 || value == 7 || value == 8 || + value == 10 || value == 12; } // TimeZoneUTCOffsetHour: Hour @@ -471,13 +418,6 @@ int32_t ScanTimeZoneUTCOffsetFraction(base::Vector str, int32_t s, return 0; } -// We found the only difference between TimeZoneNumericUTCOffset and -// TimeZoneNumericUTCOffsetNotAmbiguous is ascii minus ('-') is not allowed in -// the production with only TimeZoneUTCOffsetHour for the case of -// TimeZoneNumericUTCOffsetNotAmbiguous. -// So we use the ScanTimeZoneNumericUTCOffset_Common template with an extra enum -// to implement both. -// // TimeZoneNumericUTCOffset: // TimeZoneUTCOffsetSign TimeZoneUTCOffsetHour // TimeZoneUTCOffsetSign TimeZoneUTCOffsetHour : TimeZoneUTCOffsetMinute @@ -486,32 +426,19 @@ int32_t ScanTimeZoneUTCOffsetFraction(base::Vector str, int32_t s, // TimeZoneUTCOffsetSecond [TimeZoneUTCOffsetFraction] TimeZoneUTCOffsetSign // TimeZoneUTCOffsetHour TimeZoneUTCOffsetMinute TimeZoneUTCOffsetSecond // [TimeZoneUTCOffsetFraction] -// -// TimeZoneNumericUTCOffsetNotAmbiguous : -// + TimeZoneUTCOffsetHour -// U+2212 TimeZoneUTCOffsetHour -// TimeZoneUTCOffsetSign TimeZoneUTCOffsetHour : TimeZoneUTCOffsetMinute -// TimeZoneUTCOffsetSign TimeZoneUTCOffsetHour TimeZoneUTCOffsetMinute -// TimeZoneUTCOffsetSign TimeZoneUTCOffsetHour : TimeZoneUTCOffsetMinute : -// TimeZoneUTCOffsetSecond [TimeZoneUTCOffsetFraction] TimeZoneUTCOffsetSign -// TimeZoneUTCOffsetHour TimeZoneUTCOffsetMinute TimeZoneUTCOffsetSecond -// [TimeZoneUTCOffsetFraction] -enum class Ambiguous { kAmbiguous, kNotAmbiguous }; + template -int32_t ScanTimeZoneNumericUTCOffset_Common(base::Vector str, int32_t s, - ParsedISO8601Result* r, - Ambiguous ambiguous) { +int32_t ScanTimeZoneNumericUTCOffset(base::Vector str, int32_t s, + ParsedISO8601Result* r) { int32_t len, hour, minute, second, nanosecond; int32_t cur = s; if ((str.length() < (cur + 1)) || (!IsTimeZoneUTCOffsetSign(str[cur]))) { return 0; } - bool sign_is_ascii_minus = str[s] == '-'; int32_t sign = (CanonicalSign(str[cur++]) == '-') ? -1 : 1; if ((len = ScanTimeZoneUTCOffsetHour(str, cur, &hour)) == 0) return 0; cur += len; if ((cur + 1) > str.length()) { - if (ambiguous == Ambiguous::kNotAmbiguous && sign_is_ascii_minus) return 0; // TimeZoneUTCOffsetSign TimeZoneUTCOffsetHour r->tzuo_sign = sign; r->tzuo_hour = hour; @@ -536,9 +463,6 @@ int32_t ScanTimeZoneNumericUTCOffset_Common(base::Vector str, int32_t s, if ((len = ScanTimeZoneUTCOffsetSecond(str, cur, &second)) == 0) return 0; } else { if ((len = ScanTimeZoneUTCOffsetMinute(str, cur, &minute)) == 0) { - if (ambiguous == Ambiguous::kNotAmbiguous && sign_is_ascii_minus) { - return 0; - } // TimeZoneUTCOffsetSign TimeZoneUTCOffsetHour r->tzuo_sign = sign; r->tzuo_hour = hour; @@ -570,68 +494,6 @@ int32_t ScanTimeZoneNumericUTCOffset_Common(base::Vector str, int32_t s, return cur - s; } -template -int32_t ScanTimeZoneNumericUTCOffset(base::Vector str, int32_t s, - ParsedISO8601Result* r) { - return ScanTimeZoneNumericUTCOffset_Common(str, s, r, - Ambiguous::kAmbiguous); -} - -template -int32_t ScanTimeZoneNumericUTCOffsetNotAmbiguous(base::Vector str, - int32_t s, - ParsedISO8601Result* r) { - return ScanTimeZoneNumericUTCOffset_Common(str, s, r, - Ambiguous::kNotAmbiguous); -} - -// TimeZoneNumericUTCOffsetNotAmbiguousAllowedNegativeHour : -// TimeZoneNumericUTCOffsetNotAmbiguous -// `-` TimeHourNotValidMonth -template -int32_t ScanTimeZoneNumericUTCOffsetNotAmbiguousAllowedNegativeHour( - base::Vector str, int32_t s, ParsedISO8601Result* r) { - int32_t len; - if ((len = ScanTimeZoneNumericUTCOffsetNotAmbiguous(str, s, r)) > 0) { - return len; - } - if (str.length() >= (s + 3) && str[s] == '-') { - int32_t time_hour; - len = ScanTimeHourNotValidMonth(str, s + 1, &time_hour); - if (len == 0) return 0; - r->time_hour = time_hour; - return 1 + len; - } - return 0; -} - -// TimeHourMinuteBasicFormatNotAmbiguous : -// TimeHourNotValidMonth TimeMinute -// TimeHour TimeMinuteNotValidDay -// TimeHourNotThirtyOneDayMonth TimeMinuteThirtyOneOnly -// TimeHourTwoOnly TimeMinuteThirtyOnly -template -int32_t ScanTimeHourMinuteBasicFormatNotAmbiguous(base::Vector str, - int32_t s, - ParsedISO8601Result* r) { - int32_t time_hour, time_minute; - int32_t len1, len2; - if (((len1 = ScanTimeHourNotValidMonth(str, s, &time_hour)) > 0 && - (len2 = ScanTimeMinute(str, s + len1, &time_minute)) > 0) || - ((len1 = ScanTimeHour(str, s, &time_hour)) > 0 && - (len2 = ScanTimeMinuteNotValidDay(str, s + len1, &time_minute)) > 0) || - ((len1 = ScanTimeHourNotThirtyOneDayMonth(str, s, &time_hour)) > 0 && - (len2 = ScanTimeMinuteThirtyOneOnly(str, s + len1, &time_minute)) > 0) || - ((len1 = ScanTimeHourTwoOnly(str, s, &time_hour)) > 0 && - (len2 = ScanTimeMinuteThirtyOnly(str, s + len1, &time_minute)) > 0)) { - // Only set both after we got both - r->time_hour = time_hour; - r->time_minute = time_minute; - return len1 + len2; - } - return 0; -} - // TimeZoneUTCOffset: // TimeZoneNumericUTCOffset // UTCDesignator @@ -661,19 +523,88 @@ int32_t ScanTimeZoneIANANameComponent(base::Vector str, int32_t s) { if ((cur - s) == 2 && str[s] == '.' && str[s + 1] == '.') return 0; return cur - s; } +// TimeZoneIANALegacyName : +// Etc/GMT0 +// GMT0 +// GMT-0 +// GMT+0 +// EST5EDT +// CST6CDT +// MST7MDT +// PST8PDT + +template +int32_t ScanTimeZoneIANALegacyName(base::Vector str, int32_t s) { + int32_t cur = s; + { + constexpr int32_t len = 4; + if (str.length() < cur + len) return 0; + if (CompareCharsEqual(str.begin() + cur, "GMT0", len)) return len; + } + + { + constexpr int32_t len = 5; + if (str.length() < cur + len) return 0; + if (CompareCharsEqual(str.begin() + cur, "GMT+0", len) || + CompareCharsEqual(str.begin() + cur, "GMT-0", len)) { + return len; + } + } + + { + constexpr int32_t len = 7; + if (str.length() < cur + len) return 0; + if (CompareCharsEqual(str.begin() + cur, "EST5EDT", len) || + CompareCharsEqual(str.begin() + cur, "CST6CDT", len) || + CompareCharsEqual(str.begin() + cur, "MST7MDT", len) || + CompareCharsEqual(str.begin() + cur, "PST8PDT", len)) { + return len; + } + } + + { + constexpr int32_t len = 8; + if (str.length() < cur + len) return 0; + if (CompareCharsEqual(str.begin() + cur, "Etc/GMT0", len)) return len; + } + + return 0; +} + +// Etc/GMT ASCIISign UnpaddedHour +template +int32_t ScanEtcGMTASCIISignUnpaddedHour(base::Vector str, int32_t s) { + if ((s + 9) > str.length()) return 0; + int32_t cur = s; + int32_t len = arraysize("Etc/GMT") - 1; + if (!CompareCharsEqual(str.begin() + cur, "Etc/GMT", len)) return 0; + cur += len; + Char sign = str[cur++]; + if (!IsAsciiSign(sign)) return 0; + len = ScanUnpaddedHour(str, cur); + if (len == 0) return 0; + cur += len; + return cur - s; +} // TimeZoneIANANameTail : // TimeZoneIANANameComponent // TimeZoneIANANameComponent / TimeZoneIANANameTail // TimeZoneIANAName : +// Etc/GMT ASCIISign UnpaddedHour // TimeZoneIANANameTail +// TimeZoneIANALegacyName // The spec text use tail recusion with TimeZoneIANANameComponent and // TimeZoneIANANameTail. In our implementation, we use an iteration loop // instead. template int32_t ScanTimeZoneIANAName(base::Vector str, int32_t s) { - int32_t cur = s; int32_t len; + if ((len = ScanEtcGMTASCIISignUnpaddedHour(str, s)) > 0 || + (len = ScanTimeZoneIANALegacyName(str, s)) > 0) { + return len; + } + int32_t cur = s; if ((len = ScanTimeZoneIANANameComponent(str, cur)) == 0) return 0; cur += len; while ((str.length() > (cur + 1)) && (str[cur] == '/')) { @@ -687,16 +618,6 @@ int32_t ScanTimeZoneIANAName(base::Vector str, int32_t s) { return cur - s; } -template -int32_t ScanTimeZoneIANAName(base::Vector str, int32_t s, - ParsedISO8601Result* r) { - int32_t len; - if ((len = ScanTimeZoneIANAName(str, s)) == 0) return 0; - r->tzi_name_start = s; - r->tzi_name_length = len; - return len; -} - // TimeZoneUTCOffsetName // Sign Hour // Sign Hour : MinuteSecond @@ -774,34 +695,21 @@ int32_t ScanEtcGMTAsciiSignHour(base::Vector str, int32_t s) { } template -int32_t ScanTimeZoneBracketedName(base::Vector str, int32_t s, - ParsedISO8601Result* r) { - int32_t len; - if ((len = ScanEtcGMTAsciiSignHour(str, s)) > 0) return len; - if ((len = ScanTimeZoneIANAName(str, s)) > 0) { - r->tzi_name_start = s; - r->tzi_name_length = len; - return len; - } else { - r->tzi_name_start = 0; - r->tzi_name_length = 0; - } - return ScanTimeZoneUTCOffsetName(str, s); -} - -// TimeZoneBracketedAnnotation: '[' TimeZoneBracketedName ']' +int32_t ScanTimeZoneIdentifier(base::Vector str, int32_t s, + ParsedISO8601Result* r); +// TimeZoneBracketedAnnotation : +// [ TimeZoneIdentifier ] template int32_t ScanTimeZoneBracketedAnnotation(base::Vector str, int32_t s, ParsedISO8601Result* r) { if ((str.length() < (s + 3)) || (str[s] != '[')) return 0; int32_t cur = s + 1; - cur += ScanTimeZoneBracketedName(str, cur, r); - if ((cur - s == 1) || str.length() < (cur + 1) || (str[cur++] != ']')) { - // Reset value setted by ScanTimeZoneBracketedName - r->tzi_name_start = 0; - r->tzi_name_length = 0; + int32_t len = ScanTimeZoneIdentifier(str, cur, r); + cur += len; + if (len == 0 || str.length() < (cur + 1) || (str[cur] != ']')) { return 0; } + cur++; return cur - s; } @@ -831,215 +739,95 @@ int32_t ScanTimeZoneNameRequired(base::Vector str, int32_t s, } // TimeZone: -// TimeZoneOffsetRequired -// TimeZoneNameRequired -// The lookahead is at most 1 char. -SCAN_EITHER_FORWARD(TimeZone, TimeZoneOffsetRequired, TimeZoneNameRequired, - ParsedISO8601Result) +// TimeZoneUTCOffset [TimeZoneBracketedAnnotation] +// TimeZoneBracketedAnnotation +template +int32_t ScanTimeZone(base::Vector str, int32_t s, + ParsedISO8601Result* r) { + int32_t cur = s; + int32_t len; + // TimeZoneUTCOffset [TimeZoneBracketedAnnotation] + if ((len = ScanTimeZoneUTCOffset(str, cur, r)) > 0) { + cur += len; + // [TimeZoneBracketedAnnotation] + len = ScanTimeZoneBracketedAnnotation(str, cur, r); + cur += len; + return cur - s; + } + // TimeZoneBracketedAnnotation + return ScanTimeZoneBracketedAnnotation(str, cur, r); +} + +// ValidMonthDay : +// DateMonth [-] 0 NonZeroDigit +// DateMonth [-] 1 DecimalDigit +// DateMonth [-] 2 DecimalDigit +// DateMonth [-] 30 but not one of 0230 or 02-30 +// DateMonthWithThirtyOneDays [-] 31 +template +int32_t ScanValidMonthDay(base::Vector str, int32_t s) { + int32_t len; + int32_t cur = s; + int32_t date_month; + if ((len = ScanDateMonth(str, cur, &date_month)) > 0) { + cur += len; + if (str.length() >= (cur + 1)) { + if (str[cur] == '-') cur++; + int32_t day_of_month; + if ((len = ScanTwoDigitsExpectRange(str, cur, 1, 30, &day_of_month)) > + 0) { + cur += len; + // 0 NonZeroDigit + // 1 DecimalDigit + // 2 DecimalDigit + // 30 but not one of 0230 or 02-30 + if (date_month != 2 || day_of_month != 30) { + return cur - s; + } + } + } + } + // Reset cur + cur = s; + // DateMonthWithThirtyOneDays [-] 31 + if ((len = ScanDateMonthWithThirtyOneDays(str, cur)) > 0) { + cur += len; + if (str.length() >= (cur + 1)) { + if (str[cur] == '-') cur++; + int32_t dummy; + if ((len = ScanTwoDigitsExpectValue(str, cur, 31, &dummy)) > 0) { + cur += len; + return cur - s; + } + } + } + return 0; +} + +template +int32_t ScanDateSpecYearMonth(base::Vector str, int32_t s, + ParsedISO8601Result* r); -// The defintion of TimeSpecWithOptionalTimeZoneNotAmbiguous is very complex. We -// break them down into 8 template with _L suffix. // TimeSpecWithOptionalTimeZoneNotAmbiguous : -// TimeHour [TimeZoneNumericUTCOffsetNotAmbiguous] -// [TimeZoneBracketedAnnotation] -// -// TimeHourNotValidMonth TimeZone -// -// TimeHour : TimeMinute [TimeZone] -// -// TimeHourMinuteBasicFormatNotAmbiguous [TimeZoneBracketedAnnotation] -// -// TimeHour TimeMinute TimeZoneNumericUTCOffsetNotAmbiguousAllowedNegativeHour -// [TimeZoneBracketedAnnotation] -// -// TimeHour : TimeMinute : TimeSecond [TimeFraction] [TimeZone] -// -// TimeHour TimeMinute TimeSecondNotValidMonth [TimeZone] -// -// TimeHour TimeMinute TimeSecond TimeFraction [TimeZone] -// -// TimeSpecWithOptionalTimeZoneNotAmbiguous_L1: -// TimeHour [TimeZoneNumericUTCOffsetNotAmbiguous] -// [TimeZoneBracketedAnnotation] +// TimeSpec [TimeZone] but not one of ValidMonthDay or DateSpecYearMonth template -int32_t ScanTimeSpecWithOptionalTimeZoneNotAmbiguous_L1( - base::Vector str, int32_t s, ParsedISO8601Result* r) { - int32_t time_hour; +int32_t ScanTimeSpecWithOptionalTimeZoneNotAmbiguous(base::Vector str, + int32_t s, + ParsedISO8601Result* r) { int32_t cur = s; - int32_t len = ScanTimeHour(str, s, &time_hour); - if (len == 0) return 0; - cur += len; - r->time_hour = time_hour; - cur += ScanTimeZoneNumericUTCOffsetNotAmbiguous(str, cur, r); - cur += ScanTimeZoneBracketedAnnotation(str, cur, r); - return cur - s; -} - -// TimeSpecWithOptionalTimeZoneNotAmbiguous_L2: -// TimeHourNotValidMonth TimeZone -template -int32_t ScanTimeSpecWithOptionalTimeZoneNotAmbiguous_L2( - base::Vector str, int32_t s, ParsedISO8601Result* r) { - int32_t time_hour; - int32_t cur = s; - int32_t len = ScanTimeHourNotValidMonth(str, s, &time_hour); - if (len == 0) return 0; - cur += len; - if ((len = ScanTimeZone(str, cur, r)) == 0) return 0; - // Set time_hour only after we have both. - r->time_hour = time_hour; - cur += len; - return cur - s; -} - -// TimeSpecWithOptionalTimeZoneNotAmbiguous_L3: -// TimeHour : TimeMinute [TimeZone] -template -int32_t ScanTimeSpecWithOptionalTimeZoneNotAmbiguous_L3( - base::Vector str, int32_t s, ParsedISO8601Result* r) { - int32_t time_hour, time_minute; - // TimeHour - int32_t cur = s; - int32_t len = ScanTimeHour(str, s, &time_hour); - cur += len; - // : - if (str.length() < (cur + 3) || str[cur++] != ':') return 0; - // TimeMinute - if ((len = ScanTimeMinute(str, cur, &time_minute)) == 0) return 0; - // Set time_hour and time_minute only after we have both. - r->time_hour = time_hour; - r->time_minute = time_minute; + int32_t len; + if ((len = ScanTimeSpec(str, cur, r)) == 0) return 0; cur += len; // [TimeZone] - cur += ScanTimeZone(str, cur, r); - return cur - s; -} - -// TimeSpecWithOptionalTimeZoneNotAmbiguous_L4: -// TimeHourMinuteBasicFormatNotAmbiguous [TimeZoneBracketedAnnotation] -template -int32_t ScanTimeSpecWithOptionalTimeZoneNotAmbiguous_L4( - base::Vector str, int32_t s, ParsedISO8601Result* r) { - int32_t cur = s; - int32_t len = ScanTimeHourMinuteBasicFormatNotAmbiguous(str, cur, r); - if (len == 0) return 0; + len = ScanTimeZone(str, cur, r); cur += len; - cur += ScanTimeZoneBracketedAnnotation(str, cur, r); - return cur - s; -} - -// TimeSpecWithOptionalTimeZoneNotAmbiguous_L5: -// TimeHour TimeMinute TimeZoneNumericUTCOffsetNotAmbiguousAllowedNegativeHour -// [TimeZoneBracketedAnnotation] -template -int32_t ScanTimeSpecWithOptionalTimeZoneNotAmbiguous_L5( - base::Vector str, int32_t s, ParsedISO8601Result* r) { - int32_t time_hour, time_minute; - // TimeHour - int32_t cur = s; - int32_t len = ScanTimeHour(str, s, &time_hour); - if (len == 0) return 0; - cur += len; - // TimeMinute - if ((len = ScanTimeMinute(str, cur, &time_minute)) == 0) return 0; - cur += len; - // TimeZoneNumericUTCOffsetNotAmbiguousAllowedNegativeHour - if ((len = ScanTimeZoneNumericUTCOffsetNotAmbiguousAllowedNegativeHour( - str, cur, r)) == 0) - return 0; - // Set time_hour and time_minute only after we have both. - r->time_hour = time_hour; - r->time_minute = time_minute; - cur += len; - // [TimeZoneBracketedAnnotation] - cur += ScanTimeZoneBracketedAnnotation(str, cur, r); - return cur - s; -} - -// TimeSpecWithOptionalTimeZoneNotAmbiguous_L6: -// TimeHour : TimeMinute : TimeSecond [TimeFraction] [TimeZone] -template -int32_t ScanTimeSpecWithOptionalTimeZoneNotAmbiguous_L6( - base::Vector str, int32_t s, ParsedISO8601Result* r) { - int32_t time_hour, time_minute, time_second; - // TimeHour - int32_t cur = s; - int32_t len = ScanTimeHour(str, s, &time_hour); - cur += len; - // : - if (str.length() < (cur + 3) || str[cur++] != ':') return 0; - // TimeMinute - if ((len = ScanTimeMinute(str, cur, &time_minute)) == 0) return 0; - cur += len; - // : - if (str.length() < (cur + 3) || str[cur++] != ':') return 0; - // TimeSecond - if ((len = ScanTimeSecond(str, cur, &time_second)) == 0) return 0; - cur += len; - // Set time_hour, time_minute, and time_second only after we have them all. - r->time_hour = time_hour; - r->time_minute = time_minute; - r->time_second = time_second; - // [TimeFraction] - cur += ScanTimeFraction(str, cur, r); - // [TimeZone] - cur += ScanTimeZone(str, cur, r); - return cur - s; -} - -// TimeSpecWithOptionalTimeZoneNotAmbiguous_L7: -// TimeHour TimeMinute TimeSecondNotValidMonth [TimeZone] -template -int32_t ScanTimeSpecWithOptionalTimeZoneNotAmbiguous_L7( - base::Vector str, int32_t s, ParsedISO8601Result* r) { - int32_t time_hour, time_minute, time_second; - // TimeHour - int32_t cur = s; - int32_t len = ScanTimeHour(str, s, &time_hour); - if (len == 0) return 0; - cur += len; - // TimeMinute - if ((len = ScanTimeMinute(str, cur, &time_minute)) == 0) return 0; - cur += len; - // TimeSecondNotValidMonth - if ((len = ScanTimeSecondNotValidMonth(str, cur, &time_second)) == 0) - return 0; - cur += len; - // Set time_hour, time_minute, and time_second only after we have them all. - r->time_hour = time_hour; - r->time_minute = time_minute; - r->time_second = time_second; - // [TimeZone] - cur += ScanTimeZone(str, cur, r); - return cur - s; -} - -// TimeSpecWithOptionalTimeZoneNotAmbiguous_L8: -// TimeHour TimeMinute TimeSecond TimeFraction [TimeZone] -template -int32_t ScanTimeSpecWithOptionalTimeZoneNotAmbiguous_L8( - base::Vector str, int32_t s, ParsedISO8601Result* r) { - int32_t time_hour, time_minute, time_second; - // TimeHour - int32_t cur = s; - int32_t len = ScanTimeHour(str, s, &time_hour); - cur += len; - // TimeMinute - if ((len = ScanTimeMinute(str, cur, &time_minute)) == 0) return 0; - cur += len; - // TimeSecond - if ((len = ScanTimeSecond(str, cur, &time_second)) == 0) return 0; - cur += len; - // TimeFraction - if ((len = ScanTimeFraction(str, cur, r)) == 0) return 0; - cur += len; - // Set time_hour, time_minute, and time_second only after we have them all. - r->time_hour = time_hour; - r->time_minute = time_minute; - r->time_second = time_second; - // [TimeZone] - cur += ScanTimeZone(str, cur, r); - return cur - s; + len = cur - s; + // If it match ValidMonthDay, consider invalid. + if (ScanValidMonthDay(str, s) == len) return 0; + // If it match DateSpecYearMonth, consider invalid. + ParsedISO8601Result tmp; + if (ScanDateSpecYearMonth(str, s, &tmp) == len) return 0; + return len; } // CalendarNameComponent: @@ -1115,18 +903,16 @@ int32_t ScanCalendarTime_L1(base::Vector str, int32_t s, } // CalendarTime_L2 : -// TimeSpec [TimeZone] Calendar +// TimeSpecWithOptionalTimeZoneNotAmbiguous [Calendar] template int32_t ScanCalendarTime_L2(base::Vector str, int32_t s, ParsedISO8601Result* r) { int32_t cur = s; - int32_t len = ScanTimeSpec(str, cur, r); + int32_t len = ScanTimeSpecWithOptionalTimeZoneNotAmbiguous(str, cur, r); if (len == 0) return 0; cur += len; - // [TimeZone] - cur += ScanTimeZone(str, cur, r); - if ((len = ScanCalendar(str, cur, r)) == 0) return 0; - cur += len; + // [Calendar] + cur += ScanCalendar(str, cur, r); return cur - s; } @@ -1163,7 +949,7 @@ int32_t ScanDateSpecYearMonth(base::Vector str, int32_t s, } // DateSpecMonthDay: -// TwoDashopt DateMonth -opt DateDay +// [TwoDash] DateMonth [-] DateDay template int32_t ScanDateSpecMonthDay(base::Vector str, int32_t s, ParsedISO8601Result* r) { @@ -1190,18 +976,22 @@ int32_t ScanDateSpecMonthDay(base::Vector str, int32_t s, return cur - s; } -// TemporalTimeZoneIdentifier: -// TimeZoneNumericUTCOffset +// TimeZoneIdentifier : // TimeZoneIANAName +// TimeZoneUTCOffsetName template -int32_t ScanTemporalTimeZoneIdentifier(base::Vector str, int32_t s, - ParsedISO8601Result* r) { +int32_t ScanTimeZoneIdentifier(base::Vector str, int32_t s, + ParsedISO8601Result* r) { int32_t len; - if ((len = ScanTimeZoneNumericUTCOffset(str, s, r)) > 0) return len; - if ((len = ScanTimeZoneIANAName(str, s)) == 0) return 0; - r->tzi_name_start = s; - r->tzi_name_length = len; - return len; + int32_t cur = s; + if ((len = ScanTimeZoneIANAName(str, cur)) > 0 || + (len = ScanTimeZoneUTCOffsetName(str, cur)) > 0) { + cur += len; + r->tzi_name_start = s; + r->tzi_name_length = len; + return cur - s; + } + return 0; } // CalendarDateTime: DateTime [Calendar] @@ -1257,8 +1047,6 @@ int32_t ScanTemporalZonedDateTimeString(base::Vector str, int32_t s, SCAN_FORWARD(TemporalDateTimeString, CalendarDateTime, ParsedISO8601Result) -// TemporalTimeZoneString: -// TemporalTimeZoneIdentifier // Date [TimeSpecSeparator] TimeZone [Calendar] template int32_t ScanDate_TimeSpecSeparator_TimeZone_Calendar(base::Vector str, @@ -1276,25 +1064,14 @@ int32_t ScanDate_TimeSpecSeparator_TimeZone_Calendar(base::Vector str, return cur - s; } +// TemporalTimeZoneString: +// TimeZoneIdentifier +// Date [TimeSpecSeparator] TimeZone [Calendar] // The lookahead is at most 8 chars. -SCAN_EITHER_FORWARD(TemporalTimeZoneString, TemporalTimeZoneIdentifier, +SCAN_EITHER_FORWARD(TemporalTimeZoneString, TimeZoneIdentifier, Date_TimeSpecSeparator_TimeZone_Calendar, ParsedISO8601Result) -// TemporalTimeString -// CalendarTime -// CalendarDateTimeTimeRequired -// The lookahead is at most 7 chars. -SCAN_EITHER_FORWARD(TemporalTimeString, CalendarTime, - CalendarDateTimeTimeRequired, ParsedISO8601Result) - -// TemporalYearMonthString: -// DateSpecYearMonth -// CalendarDateTime -// The lookahead is at most 11 chars. -SCAN_EITHER_FORWARD(TemporalYearMonthString, DateSpecYearMonth, - CalendarDateTime, ParsedISO8601Result) - // TemporalMonthDayString // DateSpecMonthDay // CalendarDateTime @@ -1303,8 +1080,7 @@ SCAN_EITHER_FORWARD(TemporalMonthDayString, DateSpecMonthDay, CalendarDateTime, ParsedISO8601Result) // TemporalInstantString -// Date TimeZoneOffsetRequired -// Date DateTimeSeparator TimeSpec TimeZoneOffsetRequired +// Date [TimeSpecSeparator] TimeZoneOffsetRequired [Calendar] template int32_t ScanTemporalInstantString(base::Vector str, int32_t s, ParsedISO8601Result* r) { @@ -1314,26 +1090,15 @@ int32_t ScanTemporalInstantString(base::Vector str, int32_t s, if (len == 0) return 0; cur += len; - // TimeZoneOffsetRequired - len = ScanTimeZoneOffsetRequired(str, cur, r); - if (len > 0) { - cur += len; - return cur - s; - } - - // DateTimeSeparator - if (!(((cur + 1) < str.length()) && IsDateTimeSeparator(str[cur++]))) { - return 0; - } - // TimeSpec - len = ScanTimeSpec(str, cur, r); - if (len == 0) return 0; - cur += len; + // [TimeSpecSeparator] + cur += ScanTimeSpecSeparator(str, cur, r); // TimeZoneOffsetRequired len = ScanTimeZoneOffsetRequired(str, cur, r); if (len == 0) return 0; cur += len; + // [Calendar] + cur += ScanCalendar(str, cur, r); return cur - s; } @@ -1371,51 +1136,36 @@ SATISIFY(Date_TimeSpecSeparator_TimeZone_Calendar, ParsedISO8601Result) SATISIFY(CalendarDateTime, ParsedISO8601Result) SATISIFY(CalendarTime_L1, ParsedISO8601Result) SATISIFY(CalendarTime_L2, ParsedISO8601Result) -SATISIFY(TimeSpecWithOptionalTimeZoneNotAmbiguous_L1, ParsedISO8601Result) -SATISIFY(TimeSpecWithOptionalTimeZoneNotAmbiguous_L2, ParsedISO8601Result) -SATISIFY(TimeSpecWithOptionalTimeZoneNotAmbiguous_L3, ParsedISO8601Result) -SATISIFY(TimeSpecWithOptionalTimeZoneNotAmbiguous_L4, ParsedISO8601Result) -SATISIFY(TimeSpecWithOptionalTimeZoneNotAmbiguous_L5, ParsedISO8601Result) -SATISIFY(TimeSpecWithOptionalTimeZoneNotAmbiguous_L6, ParsedISO8601Result) -SATISIFY(TimeSpecWithOptionalTimeZoneNotAmbiguous_L7, ParsedISO8601Result) -SATISIFY(TimeSpecWithOptionalTimeZoneNotAmbiguous_L8, ParsedISO8601Result) -template -bool SatisfyTimeSpecWithOptionalTimeZoneNotAmbiguous(base::Vector str, - ParsedISO8601Result* r) { - IF_SATISFY_RETURN(TimeSpecWithOptionalTimeZoneNotAmbiguous_L1) - IF_SATISFY_RETURN(TimeSpecWithOptionalTimeZoneNotAmbiguous_L2) - IF_SATISFY_RETURN(TimeSpecWithOptionalTimeZoneNotAmbiguous_L3) - IF_SATISFY_RETURN(TimeSpecWithOptionalTimeZoneNotAmbiguous_L4) - IF_SATISFY_RETURN(TimeSpecWithOptionalTimeZoneNotAmbiguous_L5) - IF_SATISFY_RETURN(TimeSpecWithOptionalTimeZoneNotAmbiguous_L6) - IF_SATISFY_RETURN(TimeSpecWithOptionalTimeZoneNotAmbiguous_L7) - IF_SATISFY_RETURN(TimeSpecWithOptionalTimeZoneNotAmbiguous_L8) - return false; -} + template bool SatisfyCalendarTime(base::Vector str, ParsedISO8601Result* r) { IF_SATISFY_RETURN(CalendarTime_L1) IF_SATISFY_RETURN(CalendarTime_L2) - IF_SATISFY_RETURN(TimeSpecWithOptionalTimeZoneNotAmbiguous) return false; } -SATISIFY_EITHER(TemporalTimeString, CalendarTime, CalendarDateTime, +SATISIFY(CalendarDateTimeTimeRequired, ParsedISO8601Result) +SATISIFY_EITHER(TemporalTimeString, CalendarTime, CalendarDateTimeTimeRequired, ParsedISO8601Result) SATISIFY_EITHER(TemporalYearMonthString, DateSpecYearMonth, CalendarDateTime, ParsedISO8601Result) SATISIFY_EITHER(TemporalMonthDayString, DateSpecMonthDay, CalendarDateTime, ParsedISO8601Result) SATISIFY(TimeZoneNumericUTCOffset, ParsedISO8601Result) -SATISIFY(TimeZoneIANAName, ParsedISO8601Result) -SATISIFY_EITHER(TemporalTimeZoneIdentifier, TimeZoneNumericUTCOffset, - TimeZoneIANAName, ParsedISO8601Result) -SATISIFY_EITHER(TemporalTimeZoneString, TemporalTimeZoneIdentifier, +SATISIFY(TimeZoneIdentifier, ParsedISO8601Result) +SATISIFY_EITHER(TemporalTimeZoneString, TimeZoneIdentifier, Date_TimeSpecSeparator_TimeZone_Calendar, ParsedISO8601Result) SATISIFY(TemporalInstantString, ParsedISO8601Result) SATISIFY(TemporalZonedDateTimeString, ParsedISO8601Result) SATISIFY(CalendarName, ParsedISO8601Result) +// TemporalCalendarString : +// CalendarName +// TemporalInstantString +// CalendarDateTime +// CalendarTime +// DateSpecYearMonth +// DateSpecMonthDay template bool SatisfyTemporalCalendarString(base::Vector str, ParsedISO8601Result* r) { diff --git a/test/test262/test262.status b/test/test262/test262.status index a9c2852b63..6beda510b2 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -444,63 +444,15 @@ # https://github.com/tc39/proposal-temporal/pull/1862 'built-ins/Temporal/Duration/prototype/total/timezone-getpossibleinstantsfor-iterable': [FAIL], - # TimeZone name test should move to intl402 - # https://github.com/tc39/test262/issues/3253 - 'built-ins/Temporal/Duration/prototype/add/relativeto-string-datetime': [FAIL], - 'built-ins/Temporal/Duration/prototype/subtract/relativeto-string-datetime': [FAIL], - - # precision - 'built-ins/Temporal/Duration/prototype/total/relativeto-string-datetime': [FAIL], - # https://bugs.chromium.org/p/v8/issues/detail?id=11544 'built-ins/Temporal/Calendar/prototype/weekOfYear/calendar-datefromfields-called-with-options-undefined': [FAIL], - 'built-ins/Temporal/Duration/compare/relativeto-sub-minute-offset': [FAIL], - 'built-ins/Temporal/Duration/prototype/add/relativeto-sub-minute-offset': [FAIL], - 'built-ins/Temporal/Duration/prototype/round/relativeto-string-datetime': [FAIL], - 'built-ins/Temporal/Duration/prototype/round/relativeto-sub-minute-offset': [FAIL], - 'built-ins/Temporal/Duration/prototype/subtract/relativeto-sub-minute-offset': [FAIL], 'built-ins/Temporal/Duration/prototype/total/balance-negative-result': [FAIL], - 'built-ins/Temporal/Duration/prototype/total/relativeto-sub-minute-offset': [FAIL], - 'built-ins/Temporal/Instant/prototype/toZonedDateTimeISO/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/Instant/prototype/toZonedDateTime/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/Now/zonedDateTimeISO/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/Now/zonedDateTime/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/PlainDateTime/prototype/toZonedDateTime/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/PlainTime/prototype/toZonedDateTime/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/TimeZone/from/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/ZonedDateTime/compare/zoneddatetime-string': [FAIL], - 'built-ins/Temporal/ZonedDateTime/compare/zoneddatetime-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/ZonedDateTime/from/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/ZonedDateTime/from/zoneddatetime-string': [FAIL], - 'built-ins/Temporal/ZonedDateTime/from/zoneddatetime-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/ZonedDateTime/from/zoneddatetime-sub-minute-offset': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/equals/sub-minute-offset': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/equals/timezone-string-datetime': [SKIP], - 'built-ins/Temporal/ZonedDateTime/prototype/equals/timezone-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/equals/zoneddatetime-string': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/equals/zoneddatetime-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/since/sub-minute-offset': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/since/zoneddatetime-string': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/since/zoneddatetime-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/until/sub-minute-offset': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/until/zoneddatetime-string': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/until/zoneddatetime-string-multiple-offsets': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/withTimeZone/timezone-string-multiple-offsets': [FAIL], 'intl402/Temporal/Calendar/prototype/dateFromFields/infinity-throws-rangeerror': [FAIL], 'intl402/Temporal/Calendar/prototype/monthDayFromFields/infinity-throws-rangeerror': [FAIL], 'intl402/Temporal/Calendar/prototype/yearMonthFromFields/infinity-throws-rangeerror': [FAIL], 'intl402/Temporal/Duration/prototype/round/relativeto-string-datetime': [FAIL], 'intl402/Temporal/Duration/prototype/total/relativeto-string-datetime': [FAIL], 'intl402/Temporal/PlainYearMonth/from/argument-object': [FAIL], - 'built-ins/Temporal/PlainDate/prototype/toPlainDateTime/argument-string-no-implicit-midnight': [FAIL], - 'built-ins/Temporal/PlainDate/prototype/toZonedDateTime/argument-string-no-implicit-midnight': [FAIL], - 'built-ins/Temporal/PlainDateTime/prototype/withPlainTime/argument-string-no-implicit-midnight': [FAIL], - 'built-ins/Temporal/PlainTime/compare/argument-string-no-implicit-midnight': [FAIL], - 'built-ins/Temporal/PlainTime/from/argument-string-no-implicit-midnight': [FAIL], - 'built-ins/Temporal/PlainTime/prototype/since/argument-string-no-implicit-midnight': [FAIL], - 'built-ins/Temporal/PlainTime/prototype/until/argument-string-no-implicit-midnight': [FAIL], - 'built-ins/Temporal/ZonedDateTime/prototype/withPlainTime/argument-string-no-implicit-midnight': [FAIL], 'built-ins/Temporal/Instant/prototype/round/rounding-direction': [FAIL], 'built-ins/Temporal/Instant/prototype/toString/rounding-direction': [FAIL], @@ -575,21 +527,16 @@ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-calendar-number': [FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-calendar-number': [FAIL], - 'built-ins/Temporal/ZonedDateTime/timezone-string-multiple-offsets': [FAIL], 'built-ins/Temporal/Duration/prototype/add/relativeto-year': [FAIL], - 'built-ins/Temporal/Instant/from/argument-string': [FAIL], 'intl402/Temporal/Calendar/prototype/dateFromFields/order-of-operations': [FAIL], 'intl402/Temporal/Calendar/prototype/monthDayFromFields/order-of-operations': [FAIL], 'intl402/Temporal/Calendar/prototype/yearMonthFromFields/order-of-operations': [FAIL], 'intl402/Temporal/Duration/compare/relativeto-hour': [FAIL], - 'built-ins/Temporal/PlainTime/prototype/equals/argument-string-no-implicit-midnight': [FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-with-fractional-days-different-sign': [FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-zoneddatetime-with-fractional-days': [FAIL], 'intl402/Temporal/TimeZone/prototype/getNextTransition/subtract-second-and-nanosecond-from-last-transition': [FAIL], 'intl402/Temporal/TimeZone/prototype/getPreviousTransition/nanoseconds-subtracted-or-added-at-dst-transition': [FAIL], - 'intl402/Temporal/TimeZone/from/etc-timezone': [FAIL], - 'intl402/Temporal/TimeZone/from/iana-legacy-names': [FAIL], 'intl402/Temporal/TimeZone/prototype/getNextTransition/transition-at-instant-boundaries': [FAIL], 'intl402/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/nanoseconds-subtracted-or-added-at-dst-transition': [FAIL], 'intl402/Temporal/TimeZone/prototype/getPlainDateTimeFor/dst': [FAIL], @@ -624,16 +571,6 @@ 'harness/temporalHelpers-one-shift-time-zone': [SKIP], - 'built-ins/Temporal/Instant/compare/instant-string': [FAIL], - 'built-ins/Temporal/Instant/from/instant-string': [FAIL], - 'built-ins/Temporal/Instant/prototype/equals/instant-string': [FAIL], - 'built-ins/Temporal/Instant/prototype/since/instant-string': [FAIL], - 'built-ins/Temporal/Instant/prototype/until/instant-string': [FAIL], - 'built-ins/Temporal/TimeZone/prototype/getNextTransition/instant-string': [FAIL], - 'built-ins/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/instant-string': [FAIL], - 'built-ins/Temporal/TimeZone/prototype/getOffsetStringFor/instant-string': [FAIL], - 'built-ins/Temporal/TimeZone/prototype/getPlainDateTimeFor/instant-string': [FAIL], - 'built-ins/Temporal/TimeZone/prototype/getPreviousTransition/instant-string': [FAIL], 'staging/Intl402/Temporal/old/addition-across-lunisolar-leap-months': [FAIL], 'staging/Intl402/Temporal/old/date-time-format': [FAIL], 'staging/Intl402/Temporal/old/datetime-toLocaleString': [FAIL], diff --git a/test/unittests/temporal/temporal-parser-unittest.cc b/test/unittests/temporal/temporal-parser-unittest.cc index 276e8a86ba..6277c7a3f1 100644 --- a/test/unittests/temporal/temporal-parser-unittest.cc +++ b/test/unittests/temporal/temporal-parser-unittest.cc @@ -77,16 +77,18 @@ char asciitolower(char in) { return (in <= 'Z' && in >= 'A') ? (in - ('Z' - 'z')) : in; } -#define IMPL_VERIFY_PARSE_TEMPORAL_DATE_STRING_SUCCESS(R) \ - void VerifyParseTemporal##R##StringSuccess( \ - const char* str, int32_t date_year, int32_t date_month, \ - int32_t date_day, const char* calendar_name) { \ - Handle input = MakeString(str); \ - ParsedISO8601Result actual = \ - *TemporalParser::ParseTemporal##R##String(i_isolate(), input); \ - CheckDate(actual, date_year, date_month, date_day); \ - CheckCalendar(i_isolate(), input, actual.calendar_name_start, \ - actual.calendar_name_length, calendar_name); \ +#define IMPL_VERIFY_PARSE_TEMPORAL_DATE_STRING_SUCCESS(R) \ + void VerifyParseTemporal##R##StringSuccess( \ + const char* str, int32_t date_year, int32_t date_month, \ + int32_t date_day, const char* calendar_name) { \ + Handle input = MakeString(str); \ + base::Optional result = \ + TemporalParser::ParseTemporal##R##String(i_isolate(), input); \ + CHECK(result.has_value()); \ + ParsedISO8601Result actual = *result; \ + CheckDate(actual, date_year, date_month, date_day); \ + CheckCalendar(i_isolate(), input, actual.calendar_name_start, \ + actual.calendar_name_length, calendar_name); \ } #define IMPL_VERIFY_PARSE_TEMPORAL_DATE_TIME_STRING_SUCCESS(R) \ @@ -96,8 +98,10 @@ char asciitolower(char in) { int32_t time_second, int32_t time_nanosecond, \ const char* calendar_name) { \ Handle input = MakeString(str); \ - ParsedISO8601Result actual = \ - *TemporalParser::ParseTemporal##R##String(i_isolate(), input); \ + base::Optional result = \ + TemporalParser::ParseTemporal##R##String(i_isolate(), input); \ + CHECK(result.has_value()); \ + ParsedISO8601Result actual = *result; \ CheckDate(actual, date_year, date_month, date_day); \ CheckCalendar(i_isolate(), input, actual.calendar_name_start, \ actual.calendar_name_length, calendar_name); \ @@ -113,8 +117,10 @@ char asciitolower(char in) { int32_t tzuo_second, int32_t tzuo_nanosecond, bool utc_designator, \ const char* tzi_name) { \ Handle input = MakeString(str); \ - ParsedISO8601Result actual = \ - *TemporalParser::ParseTemporal##R##String(i_isolate(), input); \ + base::Optional result = \ + TemporalParser::ParseTemporal##R##String(i_isolate(), input); \ + CHECK(result.has_value()); \ + ParsedISO8601Result actual = *result; \ CheckDate(actual, date_year, date_month, date_day); \ CheckCalendar(i_isolate(), input, actual.calendar_name_start, \ actual.calendar_name_length, calendar_name); \ @@ -141,8 +147,10 @@ class TemporalParserTest : public TestWithIsolate { int32_t tzuo_hour, int32_t tzuo_minute, int32_t tzuo_second, int32_t tzuo_nanosecond) { Handle input = MakeString(str); - ParsedISO8601Result actual = - *TemporalParser::ParseTemporalInstantString(i_isolate(), input); + base::Optional result = + TemporalParser::ParseTemporalInstantString(i_isolate(), input); + CHECK(result.has_value()); + ParsedISO8601Result actual = *result; CHECK_EQ(utc_designator, actual.utc_designator); if (!utc_designator) { CheckTimeZoneNumericUTCOffset(actual, tzuo_sign, tzuo_hour, tzuo_minute, @@ -153,8 +161,10 @@ class TemporalParserTest : public TestWithIsolate { void VerifyParseTemporalCalendarStringSuccess( const char* str, const std::string& calendar_name) { Handle input = MakeString(str); - ParsedISO8601Result actual = - *TemporalParser::ParseTemporalCalendarString(i_isolate(), input); + base::Optional result = + TemporalParser::ParseTemporalCalendarString(i_isolate(), input); + CHECK(result.has_value()); + ParsedISO8601Result actual = *result; CheckCalendar(i_isolate(), input, actual.calendar_name_start, actual.calendar_name_length, calendar_name); } @@ -185,10 +195,12 @@ class TemporalParserTest : public TestWithIsolate { int64_t whole_seconds, int64_t seconds_fraction) { Handle input = MakeString(str); - CheckDuration( - *TemporalParser::ParseTemporalDurationString(i_isolate(), input), sign, - years, months, weeks, days, whole_hours, hours_fraction, whole_minutes, - minutes_fraction, whole_seconds, seconds_fraction); + base::Optional result = + TemporalParser::ParseTemporalDurationString(i_isolate(), input); + CHECK(result.has_value()); + CheckDuration(*result, sign, years, months, weeks, days, whole_hours, + hours_fraction, whole_minutes, minutes_fraction, + whole_seconds, seconds_fraction); } void VerifyParseDurationSuccess(const char* str, @@ -202,8 +214,10 @@ class TemporalParserTest : public TestWithIsolate { void VerifyParseDurationWithPositiveSign(const char* str) { Handle input = MakeString(str); - ParsedISO8601Duration expected = - *TemporalParser::ParseTemporalDurationString(i_isolate(), input); + base::Optional result = + TemporalParser::ParseTemporalDurationString(i_isolate(), input); + CHECK(result.has_value()); + ParsedISO8601Duration expected = *result; std::string with_sign("+"); with_sign += str; VerifyParseDurationSuccess(with_sign.c_str(), expected); @@ -213,8 +227,10 @@ class TemporalParserTest : public TestWithIsolate { std::string with_sign("-"); with_sign += str; Handle input = MakeString(with_sign.c_str()); - ParsedISO8601Duration expected = - *TemporalParser::ParseTemporalDurationString(i_isolate(), input); + base::Optional result = + TemporalParser::ParseTemporalDurationString(i_isolate(), input); + CHECK(result.has_value()); + ParsedISO8601Duration expected = *result; with_sign = "\u2212"; with_sign += str; VerifyParseDurationSuccess(with_sign.c_str(), expected); @@ -222,8 +238,10 @@ class TemporalParserTest : public TestWithIsolate { void VerifyParseDurationWithLowerCase(const char* str) { Handle input = MakeString(str); - ParsedISO8601Duration expected = - *TemporalParser::ParseTemporalDurationString(i_isolate(), input); + base::Optional result = + TemporalParser::ParseTemporalDurationString(i_isolate(), input); + CHECK(result.has_value()); + ParsedISO8601Duration expected = *result; std::string lower(str); std::transform(lower.begin(), lower.end(), lower.begin(), asciitolower); VerifyParseDurationSuccess(lower.c_str(), expected); @@ -233,8 +251,10 @@ class TemporalParserTest : public TestWithIsolate { std::string period(str); std::transform(period.begin(), period.end(), period.begin(), commatoperiod); Handle input = MakeString(str); - ParsedISO8601Duration expected = - *TemporalParser::ParseTemporalDurationString(i_isolate(), input); + base::Optional result = + TemporalParser::ParseTemporalDurationString(i_isolate(), input); + CHECK(result.has_value()); + ParsedISO8601Duration expected = *result; VerifyParseDurationSuccess(str, expected); } @@ -242,9 +262,11 @@ class TemporalParserTest : public TestWithIsolate { const char* str, int32_t tzuo_sign, int32_t tzuo_hour, int32_t tzuo_minute, int32_t tzuo_second, int32_t tzuo_nanosecond) { Handle input = MakeString(str); - CheckTimeZoneNumericUTCOffset( - *TemporalParser::ParseTimeZoneNumericUTCOffset(i_isolate(), input), - tzuo_sign, tzuo_hour, tzuo_minute, tzuo_second, tzuo_nanosecond); + base::Optional result = + TemporalParser::ParseTimeZoneNumericUTCOffset(i_isolate(), input); + CHECK(result.has_value()); + CheckTimeZoneNumericUTCOffset(*result, tzuo_sign, tzuo_hour, tzuo_minute, + tzuo_second, tzuo_nanosecond); } }; @@ -375,9 +397,10 @@ class TemporalParserTest : public TestWithIsolate { /* Out of range */ \ VERIFY_PARSE_FAIL(R, "1900-12-31[Etc/GMT+24]"); \ VERIFY_PARSE_FAIL(R, "1900-12-31[Etc/GMT-24]"); \ - /* Single digit Hour */ \ - VERIFY_PARSE_FAIL(R, "1900-12-31[Etc/GMT+2]"); \ - VERIFY_PARSE_FAIL(R, "1900-12-31[Etc/GMT-0]"); \ + /* leading 0 Hour */ \ + VERIFY_PARSE_FAIL(R, "1900-12-31[Etc/GMT+02]"); \ + VERIFY_PARSE_FAIL(R, "1900-12-31[Etc/GMT-00]"); \ + VERIFY_PARSE_FAIL(R, "2021-11-09Z[Etc/GMT+01]"); \ /* Three digit hour */ \ VERIFY_PARSE_FAIL(R, "1900-12-31[Etc/GMT+201]"); \ VERIFY_PARSE_FAIL(R, "1900-12-31[Etc/GMT-000]"); \ @@ -423,29 +446,6 @@ class TemporalParserTest : public TestWithIsolate { } while (false) TEST_F(TemporalParserTest, TemporalTimeStringSuccess) { - // DateTime - // DateYear - DateMonth - DateDay - VerifyTemporalTimeStringTimeUndefined("2021-11-03"); - // DateYear DateMonth DateDay - VerifyTemporalTimeStringTimeUndefined("20211103"); - // DateExtendedYear - VerifyTemporalTimeStringTimeUndefined("+002021-11-03"); - VerifyTemporalTimeStringTimeUndefined("+000001-11-03"); - VerifyTemporalTimeStringTimeUndefined("+0020211103"); - VerifyTemporalTimeStringTimeUndefined("+0000011231"); - VerifyTemporalTimeStringTimeUndefined("+0000000101"); - VerifyTemporalTimeStringTimeUndefined("+0000000101"); - VerifyTemporalTimeStringTimeUndefined("+654321-11-03"); - VerifyTemporalTimeStringTimeUndefined("+999999-12-31"); - VerifyTemporalTimeStringTimeUndefined("-654321-11-03"); - VerifyTemporalTimeStringTimeUndefined("-999999-12-31"); - VerifyTemporalTimeStringTimeUndefined("\u2212999999-12-31"); - VerifyTemporalTimeStringTimeUndefined("+6543211103"); - VerifyTemporalTimeStringTimeUndefined("+9999991231"); - VerifyTemporalTimeStringTimeUndefined("-6543211103"); - VerifyTemporalTimeStringTimeUndefined("-9999991231"); - VerifyTemporalTimeStringTimeUndefined("\u22129999991231"); - // DateTime: Date TimeSpecSeparator_opt TimeZone_opt // Date TimeSpecSeparator // Differeent DateTimeSeparator: T or t @@ -475,17 +475,7 @@ TEST_F(TemporalParserTest, TemporalTimeStringSuccess) { 123456789, ""); VerifyParseTemporalTimeStringSuccess("19640710 09:18:27,12345678", 9, 18, 27, 123456780, ""); - // Date TimeZone - // Date TimeZoneOffsetRequired - // Date TimeZoneUTCOffset TimeZoneBracketedAnnotation_opt - // Date TimeZoneNumericUTCOffset - VerifyTemporalTimeStringTimeUndefined("2021-11-09+11"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09-12:03"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09-1203"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09-12:03:04"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09-120304"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09-12:03:04,987654321"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09-120304.987654321"); + VerifyParseTemporalTimeStringSuccess("2021-11-09T03+11", 3, kUndefined, kUndefined, kUndefined, ""); VerifyParseTemporalTimeStringSuccess("2021-11-09t04:55-12:03", 4, 55, @@ -512,10 +502,7 @@ TEST_F(TemporalParserTest, TemporalTimeStringSuccess) { ""); VerifyParseTemporalTimeStringSuccess( "19670316T223344.987654321-120304.123456789", 22, 33, 44, 987654321, ""); - // Date UTCDesignator - // Date UTCDesignator - VerifyTemporalTimeStringTimeUndefined("2021-11-09z"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09Z"); + VerifyParseTemporalTimeStringSuccess("2021-11-09T11z", 11, kUndefined, kUndefined, kUndefined, ""); VerifyParseTemporalTimeStringSuccess("2021-11-09t12Z", 12, kUndefined, @@ -538,24 +525,10 @@ TEST_F(TemporalParserTest, TemporalTimeStringSuccess) { 891234000, ""); VerifyParseTemporalTimeStringSuccess("20211109T012345,891234567Z", 1, 23, 45, 891234567, ""); - // Date TimeZoneNameRequired - // Date TimeZoneBracketedAnnotation - VerifyTemporalTimeStringTimeUndefined("2021-11-09[Etc/GMT+01]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[Etc/GMT-23]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[Etc/GMT+23]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[Etc/GMT-00]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[Etc/GMT+01]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[Etc/GMT-23]"); + VerifyParseTemporalTimeStringSuccess( "2021-11-09 23:45:56.891234567Z[Etc/GMT+23]", 23, 45, 56, 891234567, ""); // TimeZoneIANAName - VerifyTemporalTimeStringTimeUndefined("2021-11-09[ABCDEFGHIJKLMN]"); - VerifyTemporalTimeStringTimeUndefined( - "2021-11-09[ABCDEFGHIJKLMN/abcdefghijklmn/opeqrstuv]"); - VerifyTemporalTimeStringTimeUndefined( - "2021-11-09[aBcDEfGHiJ.L_N/ABC...G_..KLMN]"); - VerifyTemporalTimeStringTimeUndefined( - "2021-11-09[aBcDE-GHiJ.L_N/ABCbcdG-IJKLMN]"); VerifyParseTemporalTimeStringSuccess("2021-11-09T12z[.BCDEFGHIJKLMN]", 12, kUndefined, kUndefined, kUndefined, ""); VerifyParseTemporalTimeStringSuccess( @@ -568,26 +541,10 @@ TEST_F(TemporalParserTest, TemporalTimeStringSuccess) { "2021-11-09 " "123456.789123456-012345.789123456[aBcDEfGHiJ.L_N/ABCbcdGfIJKLMN]", 12, 34, 56, 789123456, ""); - // TimeZoneUTCOffsetName - VerifyTemporalTimeStringTimeUndefined("2021-11-09[+12]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[+12:34]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[+12:34:56]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[+12:34:56,789123456]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[+12:34:56.789123456]"); - VerifyTemporalTimeStringTimeUndefined("2021-11-09[\u221200:34:56.789123456]"); VerifyParseTemporalTimeStringSuccess("2021-11-09 01:23:45.678912345Z", 1, 23, 45, 678912345, ""); - VerifyParseTemporalTimeStringSuccess("2021-03-11[u-ca=iso8601]", kUndefined, - kUndefined, kUndefined, kUndefined, - "iso8601"); - VerifyParseTemporalTimeStringSuccess("2021-03-11[u-ca=abcdefgh-wxyzefg]", - kUndefined, kUndefined, kUndefined, - kUndefined, "abcdefgh-wxyzefg"); - VerifyParseTemporalTimeStringSuccess( - "2021-03-11[u-ca=abcdefgh-wxyzefg-ijklmnop]", kUndefined, kUndefined, - kUndefined, kUndefined, "abcdefgh-wxyzefg-ijklmnop"); VerifyParseTemporalTimeStringSuccess("2021-03-11T01[u-ca=iso8601]", 1, kUndefined, kUndefined, kUndefined, "iso8601"); @@ -601,34 +558,18 @@ TEST_F(TemporalParserTest, TemporalTimeStringSuccess) { "ABCbcdGfIJKLMN][u-ca=abc]", 12, 34, 56, 789000000, "abc"); - VerifyParseTemporalTimeStringSuccess( - "2021-03-11[+12:34:56,789123456][u-ca=abcdefgh-wxyzefg]", kUndefined, - kUndefined, kUndefined, kUndefined, "abcdefgh-wxyzefg"); VerifyParseTemporalTimeStringSuccess( "2021-03-11T23[+12:34:56,789123456][u-ca=abcdefgh-wxyzefg]", 23, kUndefined, kUndefined, kUndefined, "abcdefgh-wxyzefg"); - VerifyParseTemporalTimeStringSuccess( - "20210311[\u221200:34:56.789123456][u-ca=abcdefgh-wxyzefg-ijklmnop]", - kUndefined, kUndefined, kUndefined, kUndefined, - "abcdefgh-wxyzefg-ijklmnop"); VerifyParseTemporalTimeStringSuccess( "20210311T22:11[\u221200:34:56.789123456][u-ca=abcdefgh-" "wxyzefg-ijklmnop]", 22, 11, kUndefined, kUndefined, "abcdefgh-wxyzefg-ijklmnop"); - VerifyParseTemporalTimeStringSuccess("2021-11-03[u-ca=abc]", kUndefined, - kUndefined, kUndefined, kUndefined, - "abc"); VerifyParseTemporalTimeStringSuccess("2021-11-03T23:45:12.345[u-ca=abc]", 23, 45, 12, 345000000, "abc"); - VerifyParseTemporalTimeStringSuccess("2021-11-03[u-ca=iso-8601]", kUndefined, - kUndefined, kUndefined, kUndefined, - "iso-8601"); VerifyParseTemporalTimeStringSuccess("2021-11-03 234527[u-ca=iso-8601]", 23, 45, 27, kUndefined, "iso-8601"); - VerifyParseTemporalTimeStringSuccess("2021-11-03[u-ca=123456-789]", - kUndefined, kUndefined, kUndefined, - kUndefined, "123456-789"); VerifyParseTemporalTimeStringSuccess("2021-11-03t12[u-ca=123456-789]", 12, kUndefined, kUndefined, kUndefined, "123456-789"); @@ -681,6 +622,80 @@ TEST_F(TemporalParserTest, TemporalTimeStringIllegal) { VERIFY_PARSE_FAIL(TemporalTimeString, "23:60:02.123456789"); VERIFY_PARSE_FAIL(TemporalTimeString, "23:59:61.123456789"); VERIFY_PARSE_FAIL(TemporalTimeString, "23:33:44.0000000000"); + + VERIFY_PARSE_FAIL(TemporalTimeString, "1900-12-31[Etc/GMT+2]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "1900-12-31[Etc/GMT-0]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "1900-12-31[Etc/GMT-0]"); + + // Date TimeZone + // DateExtendedYear + VERIFY_PARSE_FAIL(TemporalTimeString, "+002021-11-03"); + VERIFY_PARSE_FAIL(TemporalTimeString, "+000001-11-03"); + VERIFY_PARSE_FAIL(TemporalTimeString, "+0020211103"); + VERIFY_PARSE_FAIL(TemporalTimeString, "+0000011231"); + VERIFY_PARSE_FAIL(TemporalTimeString, "+0000000101"); + VERIFY_PARSE_FAIL(TemporalTimeString, "+0000000101"); + VERIFY_PARSE_FAIL(TemporalTimeString, "+654321-11-03"); + VERIFY_PARSE_FAIL(TemporalTimeString, "+999999-12-31"); + VERIFY_PARSE_FAIL(TemporalTimeString, "-654321-11-03"); + VERIFY_PARSE_FAIL(TemporalTimeString, "-999999-12-31"); + VERIFY_PARSE_FAIL(TemporalTimeString, "\u2212999999-12-31"); + VERIFY_PARSE_FAIL(TemporalTimeString, "+6543211103"); + VERIFY_PARSE_FAIL(TemporalTimeString, "+9999991231"); + VERIFY_PARSE_FAIL(TemporalTimeString, "-6543211103"); + VERIFY_PARSE_FAIL(TemporalTimeString, "-9999991231"); + VERIFY_PARSE_FAIL(TemporalTimeString, "\u22129999991231"); + + // Date TimeZone + // Date TimeZoneOffsetRequired + // Date TimeZoneUTCOffset TimeZoneBracketedAnnotation_opt + // Date TimeZoneNumericUTCOffset + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09+11"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09-12:03"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09-1203"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09-12:03:04"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09-120304"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09-12:03:04,987654321"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09-120304.987654321"); + + // Date UTCDesignator + // Date UTCDesignator + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09z"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09Z"); + + // Date TimeZoneNameRequired + // Date TimeZoneBracketedAnnotation + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[Etc/GMT+01]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[Etc/GMT-23]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[Etc/GMT+23]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[Etc/GMT-00]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[Etc/GMT+01]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[Etc/GMT-23]"); + + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[ABCDEFGHIJKLMN]"); + VERIFY_PARSE_FAIL(TemporalTimeString, + "2021-11-09[ABCDEFGHIJKLMN/abcdefghijklmn/opeqrstuv]"); + VERIFY_PARSE_FAIL(TemporalTimeString, + "2021-11-09[aBcDEfGHiJ.L_N/ABC...G_..KLMN]"); + VERIFY_PARSE_FAIL(TemporalTimeString, + "2021-11-09[aBcDE-GHiJ.L_N/ABCbcdG-IJKLMN]"); + // TimeZoneUTCOffsetName + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[+12]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[+12:34]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[+12:34:56]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[+12:34:56,789123456]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[+12:34:56.789123456]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-09[\u221200:34:56.789123456]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-03-11[u-ca=iso8601]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-03-11[u-ca=abcdefgh-wxyzefg]"); + VERIFY_PARSE_FAIL(TemporalTimeString, + "2021-03-11[u-ca=abcdefgh-wxyzefg-ijklmnop]"); + + VERIFY_PARSE_FAIL(TemporalTimeString, + "2021-03-11[+12:34:56,789123456][u-ca=abcdefgh-wxyzefg]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-03[u-ca=abc]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-03[u-ca=iso-8601]"); + VERIFY_PARSE_FAIL(TemporalTimeString, "2021-11-03[u-ca=123456-789]"); } #define IMPL_DATE_TIME_STRING_SUCCESS(R) \ @@ -828,17 +843,16 @@ TEST_F(TemporalParserTest, TemporalTimeStringIllegal) { 45, 891234567, ""); \ /* Date TimeZoneNameRequired */ \ /* Date TimeZoneBracketedAnnotation */ \ - VerifyParse##R##Success("2021-11-09[Etc/GMT+01]", 2021, 11, 9, kUndefined, \ + VerifyParse##R##Success("2021-11-09[Etc/GMT+1]", 2021, 11, 9, kUndefined, \ kUndefined, kUndefined, kUndefined, ""); \ VerifyParse##R##Success("2021-11-09[Etc/GMT-23]", 2021, 11, 9, kUndefined, \ kUndefined, kUndefined, kUndefined, ""); \ VerifyParse##R##Success("2021-11-09[Etc/GMT+23]", 2021, 11, 9, kUndefined, \ kUndefined, kUndefined, kUndefined, ""); \ - VerifyParse##R##Success("2021-11-09[Etc/GMT-00]", 2021, 11, 9, kUndefined, \ + VerifyParse##R##Success("2021-11-09[Etc/GMT-0]", 2021, 11, 9, kUndefined, \ + kUndefined, kUndefined, kUndefined, ""); \ + VerifyParse##R##Success("2021-11-09Z[Etc/GMT+1]", 2021, 11, 9, kUndefined, \ kUndefined, kUndefined, kUndefined, ""); \ - VerifyParse##R##Success("2021-11-09Z[Etc/GMT+01]", 2021, 11, 9, \ - kUndefined, kUndefined, kUndefined, kUndefined, \ - ""); \ VerifyParse##R##Success("2021-11-09z[Etc/GMT-23]", 2021, 11, 9, \ kUndefined, kUndefined, kUndefined, kUndefined, \ ""); \ @@ -1072,15 +1086,15 @@ TEST_F(TemporalParserTest, TemporalYearMonthStringSuccess) { 11, 9, ""); // Date TimeZoneNameRequired // Date TimeZoneBracketedAnnotation - VerifyParseTemporalYearMonthStringSuccess("2021-11-09[Etc/GMT+01]", 2021, 11, + VerifyParseTemporalYearMonthStringSuccess("2021-11-09[Etc/GMT+1]", 2021, 11, 9, ""); VerifyParseTemporalYearMonthStringSuccess("2021-11-09[Etc/GMT-23]", 2021, 11, 9, ""); VerifyParseTemporalYearMonthStringSuccess("2021-11-09[Etc/GMT+23]", 2021, 11, 9, ""); - VerifyParseTemporalYearMonthStringSuccess("2021-11-09[Etc/GMT-00]", 2021, 11, + VerifyParseTemporalYearMonthStringSuccess("2021-11-09[Etc/GMT-0]", 2021, 11, 9, ""); - VerifyParseTemporalYearMonthStringSuccess("2021-11-09Z[Etc/GMT+01]", 2021, 11, + VerifyParseTemporalYearMonthStringSuccess("2021-11-09Z[Etc/GMT+1]", 2021, 11, 9, ""); VerifyParseTemporalYearMonthStringSuccess("2021-11-09z[Etc/GMT-23]", 2021, 11, 9, ""); @@ -1298,15 +1312,15 @@ TEST_F(TemporalParserTest, TemporalMonthDayStringSuccess) { 11, 9, ""); // Date TimeZoneNameRequired // Date TimeZoneBracketedAnnotation - VerifyParseTemporalMonthDayStringSuccess("2021-11-09[Etc/GMT+01]", 2021, 11, - 9, ""); + VerifyParseTemporalMonthDayStringSuccess("2021-11-09[Etc/GMT+1]", 2021, 11, 9, + ""); VerifyParseTemporalMonthDayStringSuccess("2021-11-09[Etc/GMT-23]", 2021, 11, 9, ""); VerifyParseTemporalMonthDayStringSuccess("2021-11-09[Etc/GMT+23]", 2021, 11, 9, ""); - VerifyParseTemporalMonthDayStringSuccess("2021-11-09[Etc/GMT-00]", 2021, 11, - 9, ""); - VerifyParseTemporalMonthDayStringSuccess("2021-11-09Z[Etc/GMT+01]", 2021, 11, + VerifyParseTemporalMonthDayStringSuccess("2021-11-09[Etc/GMT-0]", 2021, 11, 9, + ""); + VerifyParseTemporalMonthDayStringSuccess("2021-11-09Z[Etc/GMT+1]", 2021, 11, 9, ""); VerifyParseTemporalMonthDayStringSuccess("2021-11-09z[Etc/GMT-23]", 2021, 11, 9, ""); From bc0e7c8722b81d92fe3c2e35da5457c25574e07e Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Sat, 10 Sep 2022 14:52:45 -0700 Subject: [PATCH 0039/1772] [Temporal] Fix weekOfYear by passing undefined Not passing null object but passing undefined while calling ToTemporalDate() Bug: v8:11544 Change-Id: I9376c32f306b000980d37bf233ffef3e83baf706 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885352 Commit-Queue: Frank Tang Reviewed-by: Adam Klein Cr-Commit-Position: refs/heads/main@{#83124} --- src/objects/js-temporal-objects.cc | 1 - test/test262/test262.status | 1 - 2 files changed, 2 deletions(-) diff --git a/src/objects/js-temporal-objects.cc b/src/objects/js-temporal-objects.cc index eb125a3707..6ac2d6387c 100644 --- a/src/objects/js-temporal-objects.cc +++ b/src/objects/js-temporal-objects.cc @@ -10822,7 +10822,6 @@ MaybeHandle JSTemporalCalendar::WeekOfYear( ASSIGN_RETURN_ON_EXCEPTION( isolate, temporal_date, ToTemporalDate(isolate, temporal_date_like, - isolate->factory()->NewJSObjectWithNullProto(), "Temporal.Calendar.prototype.weekOfYear"), Smi); // a. Let value be ! ToISOWeekOfYear(temporalDate.[[ISOYear]], diff --git a/test/test262/test262.status b/test/test262/test262.status index 6beda510b2..ac2dfe5bc6 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -445,7 +445,6 @@ 'built-ins/Temporal/Duration/prototype/total/timezone-getpossibleinstantsfor-iterable': [FAIL], # https://bugs.chromium.org/p/v8/issues/detail?id=11544 - 'built-ins/Temporal/Calendar/prototype/weekOfYear/calendar-datefromfields-called-with-options-undefined': [FAIL], 'built-ins/Temporal/Duration/prototype/total/balance-negative-result': [FAIL], 'intl402/Temporal/Calendar/prototype/dateFromFields/infinity-throws-rangeerror': [FAIL], 'intl402/Temporal/Calendar/prototype/monthDayFromFields/infinity-throws-rangeerror': [FAIL], From d468f6e0c7602e607ce6095d80ba54bf5edadaca Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Mon, 12 Sep 2022 07:34:09 +0200 Subject: [PATCH 0040/1772] [maglev] Move deopt helpers to masm Bug: v8:7700 Change-Id: I9554ee1a569cea6a04694c7e0a5b84a45196080c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3876370 Reviewed-by: Victor Gomes Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#83125} --- src/maglev/maglev-assembler-inl.h | 34 ++++++++++ src/maglev/maglev-assembler.h | 8 +++ src/maglev/maglev-ir.cc | 103 +++++++++--------------------- 3 files changed, 72 insertions(+), 73 deletions(-) diff --git a/src/maglev/maglev-assembler-inl.h b/src/maglev/maglev-assembler-inl.h index e2fd34a5a9..f9fefb53f9 100644 --- a/src/maglev/maglev-assembler-inl.h +++ b/src/maglev/maglev-assembler-inl.h @@ -259,6 +259,40 @@ inline void MaglevAssembler::JumpToDeferredIf(Condition cond, bind(&deferred_code->return_label); } +// --- +// Deopt +// --- + +inline void MaglevAssembler::RegisterEagerDeopt(EagerDeoptInfo* deopt_info, + DeoptimizeReason reason) { + if (deopt_info->reason != DeoptimizeReason::kUnknown) { + DCHECK_EQ(deopt_info->reason, reason); + } + if (deopt_info->deopt_entry_label.is_unused()) { + code_gen_state()->PushEagerDeopt(deopt_info); + deopt_info->reason = reason; + } +} + +template +inline void MaglevAssembler::EmitEagerDeopt(NodeT* node, + DeoptimizeReason reason) { + static_assert(NodeT::kProperties.can_eager_deopt()); + RegisterEagerDeopt(node->eager_deopt_info(), reason); + RecordComment("-- Jump to eager deopt"); + jmp(&node->eager_deopt_info()->deopt_entry_label); +} + +template +inline void MaglevAssembler::EmitEagerDeoptIf(Condition cond, + DeoptimizeReason reason, + NodeT* node) { + static_assert(NodeT::kProperties.can_eager_deopt()); + RegisterEagerDeopt(node->eager_deopt_info(), reason); + RecordComment("-- Jump to eager deopt"); + j(cond, &node->eager_deopt_info()->deopt_entry_label); +} + } // namespace maglev } // namespace internal } // namespace v8 diff --git a/src/maglev/maglev-assembler.h b/src/maglev/maglev-assembler.h index 74696db9e5..ec85919ef0 100644 --- a/src/maglev/maglev-assembler.h +++ b/src/maglev/maglev-assembler.h @@ -74,6 +74,14 @@ class MaglevAssembler : public MacroAssembler { inline void JumpToDeferredIf(Condition cond, Function&& deferred_code_gen, Args&&... args); + inline void RegisterEagerDeopt(EagerDeoptInfo* deopt_info, + DeoptimizeReason reason); + template + inline void EmitEagerDeopt(NodeT* node, DeoptimizeReason reason); + template + inline void EmitEagerDeoptIf(Condition cond, DeoptimizeReason reason, + NodeT* node); + compiler::NativeContextRef native_context() const { return code_gen_state()->broker()->target_native_context(); } diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index af4bc70f34..a72313dc56 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -265,49 +265,6 @@ void ToBoolean(MaglevAssembler* masm, Register value, ZoneLabelRef is_true, } } -// --- -// Deopt -// --- - -void RegisterEagerDeopt(MaglevAssembler* masm, EagerDeoptInfo* deopt_info, - DeoptimizeReason reason) { - if (deopt_info->reason != DeoptimizeReason::kUnknown) { - DCHECK_EQ(deopt_info->reason, reason); - } - if (deopt_info->deopt_entry_label.is_unused()) { - masm->code_gen_state()->PushEagerDeopt(deopt_info); - deopt_info->reason = reason; - } -} - -void EmitEagerDeopt(MaglevAssembler* masm, EagerDeoptInfo* deopt_info, - DeoptimizeReason reason) { - RegisterEagerDeopt(masm, deopt_info, reason); - __ RecordComment("-- Jump to eager deopt"); - __ jmp(&deopt_info->deopt_entry_label); -} - -template -void EmitEagerDeopt(MaglevAssembler* masm, NodeT* node, - DeoptimizeReason reason) { - static_assert(NodeT::kProperties.can_eager_deopt()); - EmitEagerDeopt(masm, node->eager_deopt_info(), reason); -} - -void EmitEagerDeoptIf(Condition cond, MaglevAssembler* masm, - DeoptimizeReason reason, EagerDeoptInfo* deopt_info) { - RegisterEagerDeopt(masm, deopt_info, reason); - __ RecordComment("-- Jump to eager deopt"); - __ j(cond, &deopt_info->deopt_entry_label); -} - -template -void EmitEagerDeoptIf(Condition cond, MaglevAssembler* masm, - DeoptimizeReason reason, NodeT* node) { - static_assert(NodeT::kProperties.can_eager_deopt()); - EmitEagerDeoptIf(cond, masm, reason, node->eager_deopt_info()); -} - // --- // Print // --- @@ -1225,10 +1182,10 @@ void CheckMaps::GenerateCode(MaglevAssembler* masm, __ AssertNotSmi(object); } else { Condition is_smi = __ CheckSmi(object); - EmitEagerDeoptIf(is_smi, masm, DeoptimizeReason::kWrongMap, this); + __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this); } __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map().object()); - EmitEagerDeoptIf(not_equal, masm, DeoptimizeReason::kWrongMap, this); + __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kWrongMap, this); } void CheckMaps::PrintParams(std::ostream& os, MaglevGraphLabeller* graph_labeller) const { @@ -1241,8 +1198,8 @@ void CheckSmi::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register object = ToRegister(receiver_input()); Condition is_smi = __ CheckSmi(object); - EmitEagerDeoptIf(NegateCondition(is_smi), masm, DeoptimizeReason::kNotASmi, - this); + __ EmitEagerDeoptIf(NegateCondition(is_smi), DeoptimizeReason::kNotASmi, + this); } void CheckSmi::PrintParams(std::ostream& os, MaglevGraphLabeller* graph_labeller) const {} @@ -1266,7 +1223,7 @@ void CheckNumber::GenerateCode(MaglevAssembler* masm, __ cmpw(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset), Immediate(BIGINT_TYPE)); } - EmitEagerDeoptIf(not_equal, masm, DeoptimizeReason::kNotANumber, this); + __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotANumber, this); __ bind(&done); } @@ -1277,7 +1234,7 @@ void CheckHeapObject::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register object = ToRegister(receiver_input()); Condition is_smi = __ CheckSmi(object); - EmitEagerDeoptIf(is_smi, masm, DeoptimizeReason::kSmi, this); + __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kSmi, this); } void CheckHeapObject::PrintParams(std::ostream& os, MaglevGraphLabeller* graph_labeller) const {} @@ -1291,11 +1248,11 @@ void CheckSymbol::GenerateCode(MaglevAssembler* masm, __ AssertNotSmi(object); } else { Condition is_smi = __ CheckSmi(object); - EmitEagerDeoptIf(is_smi, masm, DeoptimizeReason::kNotASymbol, this); + __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kNotASymbol, this); } __ LoadMap(kScratchRegister, object); __ CmpInstanceType(kScratchRegister, SYMBOL_TYPE); - EmitEagerDeoptIf(not_equal, masm, DeoptimizeReason::kNotASymbol, this); + __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotASymbol, this); } void CheckSymbol::PrintParams(std::ostream& os, MaglevGraphLabeller* graph_labeller) const {} @@ -1310,12 +1267,12 @@ void CheckString::GenerateCode(MaglevAssembler* masm, __ AssertNotSmi(object); } else { Condition is_smi = __ CheckSmi(object); - EmitEagerDeoptIf(is_smi, masm, DeoptimizeReason::kNotAString, this); + __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kNotAString, this); } __ LoadMap(kScratchRegister, object); __ CmpInstanceTypeRange(kScratchRegister, kScratchRegister, FIRST_STRING_TYPE, LAST_STRING_TYPE); - EmitEagerDeoptIf(above, masm, DeoptimizeReason::kNotAString, this); + __ EmitEagerDeoptIf(above, DeoptimizeReason::kNotAString, this); } void CheckString::PrintParams(std::ostream& os, MaglevGraphLabeller* graph_labeller) const {} @@ -1332,7 +1289,7 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, __ AssertNotSmi(object); } else { Condition is_smi = __ CheckSmi(object); - EmitEagerDeoptIf(is_smi, masm, DeoptimizeReason::kWrongMap, this); + __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this); } __ Cmp(FieldOperand(object, HeapObject::kMapOffset), map().object()); @@ -1340,7 +1297,7 @@ void CheckMapsWithMigration::GenerateCode(MaglevAssembler* masm, not_equal, [](MaglevAssembler* masm, Label* return_label, Register object, CheckMapsWithMigration* node, EagerDeoptInfo* deopt_info) { - RegisterEagerDeopt(masm, deopt_info, DeoptimizeReason::kWrongMap); + __ RegisterEagerDeopt(deopt_info, DeoptimizeReason::kWrongMap); // Reload the map to avoid needing to save it on a temporary in the fast // path. @@ -1410,7 +1367,7 @@ void CheckedInternalizedString::GenerateCode(MaglevAssembler* masm, __ AssertNotSmi(object); } else { Condition is_smi = __ CheckSmi(object); - EmitEagerDeoptIf(is_smi, masm, DeoptimizeReason::kWrongMap, this); + __ EmitEagerDeoptIf(is_smi, DeoptimizeReason::kWrongMap, this); } __ LoadMap(map_tmp, object); @@ -1906,7 +1863,7 @@ void Int32AddWithOverflow::GenerateCode(MaglevAssembler* masm, Register left = ToRegister(left_input()); Register right = ToRegister(right_input()); __ addl(left, right); - EmitEagerDeoptIf(overflow, masm, DeoptimizeReason::kOverflow, this); + __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this); } void Int32SubtractWithOverflow::AllocateVreg( @@ -1921,7 +1878,7 @@ void Int32SubtractWithOverflow::GenerateCode(MaglevAssembler* masm, Register left = ToRegister(left_input()); Register right = ToRegister(right_input()); __ subl(left, right); - EmitEagerDeoptIf(overflow, masm, DeoptimizeReason::kOverflow, this); + __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this); } void Int32MultiplyWithOverflow::AllocateVreg( @@ -1942,7 +1899,7 @@ void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm, __ movl(saved_left, result); // TODO(leszeks): peephole optimise multiplication by a constant. __ imull(result, right); - EmitEagerDeoptIf(overflow, masm, DeoptimizeReason::kOverflow, this); + __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this); // If the result is zero, check if either lhs or rhs is negative. Label end; @@ -1955,7 +1912,7 @@ void Int32MultiplyWithOverflow::GenerateCode(MaglevAssembler* masm, // so deopt. // TODO(leszeks): Consider splitting these deopts to have distinct deopt // reasons. Otherwise, the reason has to match the above. - EmitEagerDeoptIf(less, masm, DeoptimizeReason::kOverflow, this); + __ EmitEagerDeoptIf(less, DeoptimizeReason::kOverflow, this); } __ bind(&end); } @@ -2001,13 +1958,13 @@ void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm, // better. Right now all eager deopts in a node have to be the same -- // we should allow a node to emit multiple eager deopts with different // reasons. - EmitEagerDeoptIf(equal, masm, DeoptimizeReason::kNotInt32, node); + __ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node); // Check if {left} is zero, as that would produce minus zero. Left is in // rax already. __ cmpl(rax, Immediate(0)); // TODO(leszeks): Better DeoptimizeReason = kMinusZero. - EmitEagerDeoptIf(equal, masm, DeoptimizeReason::kNotInt32, node); + __ EmitEagerDeoptIf(equal, DeoptimizeReason::kNotInt32, node); // Check if {left} is kMinInt and {right} is -1, in which case we'd have // to return -kMinInt, which is not representable as Int32. @@ -2015,9 +1972,9 @@ void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm, __ j(not_equal, return_label); __ cmpl(right, Immediate(-1)); __ j(not_equal, return_label); - // TODO(leszeks): Better DeoptimizeReason = kOverflow. - EmitEagerDeopt(masm, node->eager_deopt_info(), - DeoptimizeReason::kNotInt32); + // TODO(leszeks): Better DeoptimizeReason = kOverflow, but + // eager_deopt_info is already configured as kNotInt32. + __ EmitEagerDeopt(node, DeoptimizeReason::kNotInt32); }, right, this); @@ -2026,7 +1983,7 @@ void Int32DivideWithOverflow::GenerateCode(MaglevAssembler* masm, // Check that the remainder is zero. __ cmpl(rdx, Immediate(0)); - EmitEagerDeoptIf(not_equal, masm, DeoptimizeReason::kNotInt32, this); + __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotInt32, this); DCHECK_EQ(ToRegister(result()), rax); } @@ -2119,7 +2076,7 @@ void Int32ShiftRightLogical::GenerateCode(MaglevAssembler* masm, // TODO(jgruber): Properly track signed/unsigned representations and // allocated a heap number if the result is outside smi range. __ testl(left, Immediate((1 << 31) | (1 << 30))); - EmitEagerDeoptIf(not_equal, masm, DeoptimizeReason::kOverflow, this); + __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kOverflow, this); } namespace { @@ -2298,8 +2255,8 @@ void CheckedSmiUntag::GenerateCode(MaglevAssembler* masm, // of the `sarl` for cases where the deopt uses the value from a different // register. Condition is_smi = __ CheckSmi(value); - EmitEagerDeoptIf(NegateCondition(is_smi), masm, DeoptimizeReason::kNotASmi, - this); + __ EmitEagerDeoptIf(NegateCondition(is_smi), DeoptimizeReason::kNotASmi, + this); __ SmiToInt32(value); } @@ -2312,7 +2269,7 @@ void CheckedSmiTag::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { Register reg = ToRegister(input()); __ addl(reg, reg); - EmitEagerDeoptIf(overflow, masm, DeoptimizeReason::kOverflow, this); + __ EmitEagerDeoptIf(overflow, DeoptimizeReason::kOverflow, this); } void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state) { @@ -2375,7 +2332,7 @@ void CheckedFloat64Unbox::GenerateCode(MaglevAssembler* masm, // Check if HeapNumber, deopt otherwise. __ CompareRoot(FieldOperand(value, HeapObject::kMapOffset), RootIndex::kHeapNumberMap); - EmitEagerDeoptIf(not_equal, masm, DeoptimizeReason::kNotANumber, this); + __ EmitEagerDeoptIf(not_equal, DeoptimizeReason::kNotANumber, this); __ Movsd(ToDoubleRegister(result()), FieldOperand(value, HeapNumber::kValueOffset)); __ bind(&done); @@ -3184,7 +3141,7 @@ void Return::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { void Deopt::AllocateVreg(MaglevVregAllocationState* vreg_state) {} void Deopt::GenerateCode(MaglevAssembler* masm, const ProcessingState& state) { - EmitEagerDeopt(masm, this, reason()); + __ EmitEagerDeopt(this, reason()); } void Deopt::PrintParams(std::ostream& os, MaglevGraphLabeller* graph_labeller) const { @@ -3314,7 +3271,7 @@ void AttemptOnStackReplacement(MaglevAssembler* masm, Label* return_label, __ bind(&deopt); if (V8_LIKELY(FLAG_turbofan)) { - EmitEagerDeopt(masm, node, DeoptimizeReason::kPrepareForOnStackReplacement); + __ EmitEagerDeopt(node, DeoptimizeReason::kPrepareForOnStackReplacement); } else { // Fall through. With TF disabled we cannot OSR and thus it doesn't make // sense to start the process. We do still perform all remaining From e41b78bc4e42c127f88b2413fe536c30b7454774 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Fri, 9 Sep 2022 21:51:43 +0200 Subject: [PATCH 0041/1772] [heap] Use page iterability of page new space pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During verification all LABs are iterable. For PagedNewSpace we can therefore use the property that all new space pages are iterable. Bug: v8:12612 Change-Id: I71ec079fde3c0b719ccf91b431b0b29a8a9c5a2e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3888019 Reviewed-by: Omer Katz Commit-Queue: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#83126} --- src/heap/new-spaces.cc | 21 ++++++++------------- src/heap/new-spaces.h | 3 ++- 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/src/heap/new-spaces.cc b/src/heap/new-spaces.cc index 719d05e26b..9aa413c95b 100644 --- a/src/heap/new-spaces.cc +++ b/src/heap/new-spaces.cc @@ -481,7 +481,8 @@ void NewSpace::VerifyTop() const { // We do not use the SemiSpaceObjectIterator because verification doesn't assume // that it works (it depends on the invariants we are checking). void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page, - Address current_address) const { + Address current_address, + Address stop_iteration_at_address) const { DCHECK(current_page->ContainsLimit(current_address)); size_t external_space_bytes[kNumTypes]; @@ -495,16 +496,7 @@ void NewSpace::VerifyImpl(Isolate* isolate, const Page* current_page, PtrComprCageBase cage_base(isolate); VerifyPointersVisitor visitor(heap()); const Page* page = current_page; - while (true) { - if (current_address == top()) { - if (v8_flags.minor_mc) { - // Jump over the current allocation area. - current_address = limit(); - } else { - // Early bailout since everything after top() should be free space. - break; - } - } + while (current_address != stop_iteration_at_address) { if (!Page::IsAlignedToPageSize(current_address)) { // The allocation pointer should not be in the middle of an object. CHECK_IMPLIES(!v8_flags.minor_mc, @@ -777,7 +769,7 @@ void SemiSpaceNewSpace::Verify(Isolate* isolate) const { Address current = to_space_.first_page()->area_start(); CHECK_EQ(current, to_space_.space_start()); - VerifyImpl(isolate, Page::FromAllocationAreaAddress(current), current); + VerifyImpl(isolate, Page::FromAllocationAreaAddress(current), current, top()); // Check semi-spaces. CHECK_EQ(from_space_.id(), kFromSpace); @@ -1051,7 +1043,10 @@ PagedNewSpace::~PagedNewSpace() { void PagedNewSpace::Verify(Isolate* isolate) const { const Page* first_page = paged_space_.first_page(); - if (first_page) VerifyImpl(isolate, first_page, first_page->area_start()); + if (first_page) { + // No bailout needed since all pages are iterable. + VerifyImpl(isolate, first_page, first_page->area_start(), kNullAddress); + } // Check paged-spaces. VerifyPointersVisitor visitor(heap()); diff --git a/src/heap/new-spaces.h b/src/heap/new-spaces.h index e94cb57a45..ebfca7edc0 100644 --- a/src/heap/new-spaces.h +++ b/src/heap/new-spaces.h @@ -298,7 +298,8 @@ class NewSpace : NON_EXPORTED_BASE(public SpaceWithLinearArea) { // |address|. |address| should be a valid limit on |page| (see // BasicMemoryChunk::ContainsLimit). void VerifyImpl(Isolate* isolate, const Page* current_page, - Address current_address) const; + Address current_address, + Address stop_iteration_at_address) const; #endif #ifdef V8_ENABLE_INNER_POINTER_RESOLUTION_OSB From 67d2acf3f65d5a63514bd6e2aa1679c1116545e3 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 12 Sep 2022 12:14:17 +0200 Subject: [PATCH 0042/1772] [maglev] Fix baseline flushing test --no-maglev flag Make the flush-baseline-code tests use --no-maglev in addition to --no-turbofan. Bug: v8:7700 Change-Id: I12145735e7a88f156d30e15621a9fe12e18abecf Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890990 Commit-Queue: Jakob Linke Auto-Submit: Leszek Swirski Commit-Queue: Leszek Swirski Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#83127} --- test/mjsunit/baseline/flush-baseline-code.js | 2 +- test/mjsunit/baseline/flush-only-baseline-code.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/mjsunit/baseline/flush-baseline-code.js b/test/mjsunit/baseline/flush-baseline-code.js index 8d25e609c4..db939682a6 100644 --- a/test/mjsunit/baseline/flush-baseline-code.js +++ b/test/mjsunit/baseline/flush-baseline-code.js @@ -5,7 +5,7 @@ // Flags: --expose-gc --stress-flush-code --allow-natives-syntax // Flags: --baseline-batch-compilation-threshold=0 --sparkplug // Flags: --no-always-sparkplug --lazy-feedback-allocation -// Flags: --flush-baseline-code --flush-bytecode --no-turbofan +// Flags: --flush-baseline-code --flush-bytecode --no-turbofan --no-maglev // Flags: --no-stress-concurrent-inlining // Flags: --no-concurrent-sparkplug diff --git a/test/mjsunit/baseline/flush-only-baseline-code.js b/test/mjsunit/baseline/flush-only-baseline-code.js index 185e941d34..53cb059ce7 100644 --- a/test/mjsunit/baseline/flush-only-baseline-code.js +++ b/test/mjsunit/baseline/flush-only-baseline-code.js @@ -5,7 +5,7 @@ // Flags: --expose-gc --stress-flush-code --allow-natives-syntax // Flags: --baseline-batch-compilation-threshold=0 --sparkplug // Flags: --no-always-sparkplug --lazy-feedback-allocation -// Flags: --flush-baseline-code --no-flush-bytecode --no-turbofan +// Flags: --flush-baseline-code --no-flush-bytecode --no-turbofan --no-maglev // Flags: --no-stress-concurrent-inlining // Flags: --no-concurrent-sparkplug From 1f529f2b924e7331c407d67c0570e96a233e6b38 Mon Sep 17 00:00:00 2001 From: Matthias Liedtke Date: Mon, 12 Sep 2022 12:14:38 +0200 Subject: [PATCH 0043/1772] [heap] Fix v8 DEPS roll by removing semicolon MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixed: chromium:1362431 Bug: chromium:1362431 Change-Id: Iaef432459dc39aa8f6bef5b74687af172d065574 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890991 Reviewed-by: Dominik Inführ Commit-Queue: Nico Hartmann Auto-Submit: Matthias Liedtke Reviewed-by: Nico Hartmann Cr-Commit-Position: refs/heads/main@{#83128} --- src/heap/object-start-bitmap.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/heap/object-start-bitmap.h b/src/heap/object-start-bitmap.h index 5abb22dd2d..307d834cc2 100644 --- a/src/heap/object-start-bitmap.h +++ b/src/heap/object-start-bitmap.h @@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE ObjectStartBitmap { inline uint32_t load(size_t cell_index) const; PtrComprCageBase cage_base() const { return cage_base_; } - Address offset() const { return offset_; }; + Address offset() const { return offset_; } static constexpr size_t kBitsPerCell = sizeof(uint32_t) * CHAR_BIT; static constexpr size_t kCellMask = kBitsPerCell - 1; From c1e067e993c9d59f3b7875b9ee5eb1a90e6363d3 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 12 Sep 2022 12:19:39 +0200 Subject: [PATCH 0044/1772] [maglev] Clear register state in exception handlers Exception handlers were allowing register state to leak through, which had knock-on effects of Phi allocation inserting gap moves in an illegal location (specifically, at the end of the block, thinking that it's allocating a control node since it's not allocating a body node). Fix the register leak by clearing register state, and add some invariant guards in the areas where the failure appeared. Bug: v8:7700 Change-Id: I15c1fba1a250e295f0147a4e51a6c8c5481e8c7e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890989 Reviewed-by: Jakob Linke Commit-Queue: Jakob Linke Auto-Submit: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83129} --- src/maglev/maglev-ir.h | 7 ++++++ src/maglev/maglev-regalloc.cc | 42 +++++++++++++++++++++++++++++------ src/maglev/maglev-regalloc.h | 1 + 3 files changed, 43 insertions(+), 7 deletions(-) diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index d9e73750d3..4f30f9845e 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -298,6 +298,9 @@ constexpr bool IsConstantNode(Opcode opcode) { constexpr bool IsGapMoveNode(Opcode opcode) { return kFirstGapMoveNodeOpcode <= opcode && opcode <= kLastGapMoveNodeOpcode; } +constexpr bool IsControlNode(Opcode opcode) { + return kFirstControlNodeOpcode <= opcode && opcode <= kLastControlNodeOpcode; +} constexpr bool IsBranchControlNode(Opcode opcode) { return kFirstBranchControlNodeOpcode <= opcode && opcode <= kLastBranchControlNodeOpcode; @@ -1007,6 +1010,10 @@ constexpr bool NodeBase::Is() const { return IsValueNode(opcode()); } template <> +constexpr bool NodeBase::Is() const { + return IsControlNode(opcode()); +} +template <> constexpr bool NodeBase::Is() const { return IsBranchControlNode(opcode()); } diff --git a/src/maglev/maglev-regalloc.cc b/src/maglev/maglev-regalloc.cc index 404e57f307..c3558b031c 100644 --- a/src/maglev/maglev-regalloc.cc +++ b/src/maglev/maglev-regalloc.cc @@ -311,10 +311,16 @@ void StraightForwardRegisterAllocator::AllocateRegisters() { for (block_it_ = graph_->begin(); block_it_ != graph_->end(); ++block_it_) { BasicBlock* block = *block_it_; + current_node_ = nullptr; // Restore mergepoint state. - if (block->has_state() && !block->state()->is_exception_handler()) { - InitializeRegisterValues(block->state()->register_state()); + if (block->has_state()) { + if (block->state()->is_exception_handler()) { + // Exceptions start from a blank state of register values. + ClearRegisterValues(); + } else { + InitializeRegisterValues(block->state()->register_state()); + } } else if (block->is_empty_block()) { InitializeRegisterValues(block->empty_block_register_state()); } @@ -543,6 +549,11 @@ Register GetNodeResultRegister(Node* node) { #endif // DEBUG void StraightForwardRegisterAllocator::AllocateNode(Node* node) { + // We shouldn't be visiting any gap moves during allocation, we should only + // have inserted gap moves in past visits. + DCHECK(!node->Is()); + DCHECK(!node->Is()); + current_node_ = node; if (FLAG_trace_maglev_regalloc) { printing_visitor_->os() @@ -562,7 +573,6 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) { AllocateNodeResult(node->Cast()); } - current_node_ = node; if (FLAG_trace_maglev_regalloc) { printing_visitor_->os() << "Updating uses...\n"; } @@ -570,12 +580,25 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) { // Update uses only after allocating the node result. This order is necessary // to avoid emitting input-clobbering gap moves during node result allocation. if (node->properties().can_eager_deopt()) { + if (FLAG_trace_maglev_regalloc) { + printing_visitor_->os() << "Using eager deopt nodes...\n"; + } UpdateUse(*node->eager_deopt_info()); } - for (Input& input : *node) UpdateUse(&input); + for (Input& input : *node) { + if (FLAG_trace_maglev_regalloc) { + printing_visitor_->os() + << "Using input " << PrintNodeLabel(graph_labeller(), input.node()) + << "...\n"; + } + UpdateUse(&input); + } // Lazy deopts are semantically after the node, so update them last. if (node->properties().can_lazy_deopt()) { + if (FLAG_trace_maglev_regalloc) { + printing_visitor_->os() << "Using lazy deopt nodes...\n"; + } UpdateUse(*node->lazy_deopt_info()); } @@ -933,6 +956,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode( graph_labeller()->RegisterNode(gap_move); } if (*node_it_ == nullptr) { + DCHECK(current_node_->Is()); // We're at the control node, so append instead. (*block_it_)->nodes().Add(gap_move); node_it_ = (*block_it_)->nodes().end(); @@ -1519,15 +1543,19 @@ void StraightForwardRegisterAllocator::ForEachMergePointRegisterState( }); } -void StraightForwardRegisterAllocator::InitializeRegisterValues( - MergePointRegisterState& target_state) { - // First clear the register state. +void StraightForwardRegisterAllocator::ClearRegisterValues() { ClearRegisterState(general_registers_); ClearRegisterState(double_registers_); // All registers should be free by now. DCHECK_EQ(general_registers_.unblocked_free(), kAllocatableGeneralRegisters); DCHECK_EQ(double_registers_.unblocked_free(), kAllocatableDoubleRegisters); +} + +void StraightForwardRegisterAllocator::InitializeRegisterValues( + MergePointRegisterState& target_state) { + // First clear the register state. + ClearRegisterValues(); // Then fill it in with target information. auto fill = [&](auto& registers, auto reg, RegisterState& state) { diff --git a/src/maglev/maglev-regalloc.h b/src/maglev/maglev-regalloc.h index 26b39baef7..081383dd82 100644 --- a/src/maglev/maglev-regalloc.h +++ b/src/maglev/maglev-regalloc.h @@ -203,6 +203,7 @@ class StraightForwardRegisterAllocator { void ForEachMergePointRegisterState( MergePointRegisterState& merge_point_state, Function&& f); + void ClearRegisterValues(); void InitializeRegisterValues(MergePointRegisterState& target_state); #ifdef DEBUG bool IsInRegister(MergePointRegisterState& target_state, ValueNode* incoming); From 0e2dbaac6b3d355b04460634742752dfeb2071d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Samuel=20Gro=C3=9F?= Date: Mon, 12 Sep 2022 07:20:02 +0000 Subject: [PATCH 0045/1772] Reland "[sandbox] Fold V8_SANDBOXED_EXTERNAL_POINTERS into V8_ENABLE_SANDBOX" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a reland of commit 49c59678301fafcd7f70221cdd7936253a229093 The non-deterministic snapshot issue has been fixed by using the correct field size for CodeDataContainers in serializer.cc. Original change's description: > [sandbox] Fold V8_SANDBOXED_EXTERNAL_POINTERS into V8_ENABLE_SANDBOX > > Now that all external pointers have been sandboxed, > V8_SANDBOXED_EXTERNAL_POINTERS is no longer needed. This change also > shrinks external pointer slots to 32 bits when the sandbox is enabled. > > Bug: v8:10391 > Change-Id: Iccbef27ac107b988cb23fe9ef66da6fe0bae087a > Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3869269 > Reviewed-by: Leszek Swirski > Reviewed-by: Manos Koukoutos > Reviewed-by: Nico Hartmann > Reviewed-by: Igor Sheludko > Commit-Queue: Samuel Groß > Cr-Commit-Position: refs/heads/main@{#83083} Bug: v8:10391 Change-Id: I29870404406902d99ba6016c570cc0c4d05c6c85 Cq-Include-Trybots: luci.v8.try:v8_linux64_heap_sandbox_dbg_ng,v8_linux_arm64_sim_heap_sandbox_dbg_ng Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3887899 Reviewed-by: Igor Sheludko Reviewed-by: Leszek Swirski Reviewed-by: Nico Hartmann Reviewed-by: Manos Koukoutos Commit-Queue: Samuel Groß Cr-Commit-Position: refs/heads/main@{#83130} --- BUILD.gn | 12 ---- include/v8-initialization.h | 6 +- include/v8-internal.h | 19 ++---- src/api/api.cc | 11 ---- src/codegen/tnode.h | 2 +- src/common/globals.h | 8 +-- src/objects/slots-inl.h | 4 +- src/snapshot/serializer.cc | 2 +- src/torque/torque-parser.cc | 2 - src/wasm/wasm-objects.tq | 6 +- test/cctest/test-api.cc | 5 +- test/cctest/test-strings.cc | 16 ++--- tools/v8heapconst.py | 122 ++++++++++++++++++------------------ 13 files changed, 84 insertions(+), 131 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index 4b8965626b..43667839c5 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -307,10 +307,6 @@ declare_args() { # Sets -DV8_ENABLE_SANDBOX. v8_enable_sandbox = "" - # Enable sandboxing for all external pointers. Requires v8_enable_sandbox. - # Sets -DV8_SANDBOXED_EXTERNAL_POINTERS. - v8_enable_sandboxed_external_pointers = false - # Enable all available sandbox features. Implies v8_enable_sandbox. v8_enable_sandbox_future = false @@ -534,7 +530,6 @@ if (v8_enable_sandbox == "") { # Enable all available sandbox features if sandbox future is enabled. if (v8_enable_sandbox_future) { - v8_enable_sandboxed_external_pointers = true v8_enable_sandbox = true } @@ -569,9 +564,6 @@ assert(!v8_enable_sandbox || v8_enable_pointer_compression_shared_cage, assert(!v8_enable_sandbox || v8_enable_external_code_space, "The sandbox requires the external code space") -assert(!v8_enable_sandboxed_external_pointers || v8_enable_sandbox, - "Sandboxed external pointers require the sandbox") - assert(!v8_expose_memory_corruption_api || v8_enable_sandbox, "The Memory Corruption API requires the sandbox") @@ -749,7 +741,6 @@ external_v8_defines = [ "V8_31BIT_SMIS_ON_64BIT_ARCH", "V8_COMPRESS_ZONES", "V8_ENABLE_SANDBOX", - "V8_SANDBOXED_EXTERNAL_POINTERS", "V8_DEPRECATION_WARNINGS", "V8_IMMINENT_DEPRECATION_WARNINGS", "V8_NO_ARGUMENTS_ADAPTOR", @@ -780,9 +771,6 @@ if (v8_enable_zone_compression) { if (v8_enable_sandbox) { enabled_external_v8_defines += [ "V8_ENABLE_SANDBOX" ] } -if (v8_enable_sandboxed_external_pointers) { - enabled_external_v8_defines += [ "V8_SANDBOXED_EXTERNAL_POINTERS" ] -} if (v8_deprecation_warnings) { enabled_external_v8_defines += [ "V8_DEPRECATION_WARNINGS" ] } diff --git a/include/v8-initialization.h b/include/v8-initialization.h index 7bbec662a7..d3e35d6ec5 100644 --- a/include/v8-initialization.h +++ b/include/v8-initialization.h @@ -100,9 +100,6 @@ class V8_EXPORT V8 { const int kBuildConfiguration = (internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) | (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) | - (internal::SandboxedExternalPointersAreEnabled() - ? kSandboxedExternalPointers - : 0) | (internal::SandboxIsEnabled() ? kSandbox : 0); return Initialize(kBuildConfiguration); } @@ -273,8 +270,7 @@ class V8_EXPORT V8 { enum BuildConfigurationFeatures { kPointerCompression = 1 << 0, k31BitSmis = 1 << 1, - kSandboxedExternalPointers = 1 << 2, - kSandbox = 1 << 3, + kSandbox = 1 << 2, }; /** diff --git a/include/v8-internal.h b/include/v8-internal.h index 818c720cb4..ed6aff1426 100644 --- a/include/v8-internal.h +++ b/include/v8-internal.h @@ -166,14 +166,6 @@ constexpr bool SandboxIsEnabled() { #endif } -constexpr bool SandboxedExternalPointersAreEnabled() { -#ifdef V8_SANDBOXED_EXTERNAL_POINTERS - return true; -#else - return false; -#endif -} - // SandboxedPointers are guaranteed to point into the sandbox. This is achieved // for example by storing them as offset rather than as raw pointers. using SandboxedPointer_t = Address; @@ -272,7 +264,7 @@ using ExternalPointerHandle = uint32_t; // ExternalPointers point to objects located outside the sandbox. When // sandboxed external pointers are enabled, these are stored on heap as // ExternalPointerHandles, otherwise they are simply raw pointers. -#ifdef V8_SANDBOXED_EXTERNAL_POINTERS +#ifdef V8_ENABLE_SANDBOX using ExternalPointer_t = ExternalPointerHandle; #else using ExternalPointer_t = Address; @@ -399,9 +391,8 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = { // When the sandbox is enabled, external pointers marked as "sandboxed" above // use the external pointer table (i.e. are sandboxed). This allows a gradual -// rollout of external pointer sandboxing. If V8_SANDBOXED_EXTERNAL_POINTERS is -// defined, all external pointers are sandboxed. If the sandbox is off, no -// external pointers are sandboxed. +// rollout of external pointer sandboxing. If the sandbox is off, no external +// pointers are sandboxed. // // Sandboxed external pointer tags are available when compressing pointers even // when the sandbox is off. Some tags (e.g. kWaiterQueueNodeTag) are used @@ -409,9 +400,7 @@ constexpr uint64_t kAllExternalPointerTypeTags[] = { // alignment requirements. #define sandboxed(X) (X << kExternalPointerTagShift) | kExternalPointerMarkBit #define unsandboxed(X) kUnsandboxedExternalPointerTag -#if defined(V8_SANDBOXED_EXTERNAL_POINTERS) -#define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = sandboxed(Bits), -#elif defined(V8_COMPRESS_POINTERS) +#if defined(V8_COMPRESS_POINTERS) #define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = State(Bits), #else #define EXTERNAL_POINTER_TAG_ENUM(Name, State, Bits) Name = unsandboxed(Bits), diff --git a/src/api/api.cc b/src/api/api.cc index b172a1d07e..96e8b69e29 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -6162,17 +6162,6 @@ bool v8::V8::Initialize(const int build_config) { kEmbedderSmiValueSize, internal::kSmiValueSize); } - const bool kEmbedderSandboxedExternalPointers = - (build_config & kSandboxedExternalPointers) != 0; - if (kEmbedderSandboxedExternalPointers != - V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) { - FATAL( - "Embedder-vs-V8 build configuration mismatch. On embedder side " - "sandboxed external pointers is %s while on V8 side it's %s.", - kEmbedderSandboxedExternalPointers ? "ENABLED" : "DISABLED", - V8_SANDBOXED_EXTERNAL_POINTERS_BOOL ? "ENABLED" : "DISABLED"); - } - const bool kEmbedderSandbox = (build_config & kSandbox) != 0; if (kEmbedderSandbox != V8_ENABLE_SANDBOX_BOOL) { FATAL( diff --git a/src/codegen/tnode.h b/src/codegen/tnode.h index ecd2974b95..1094e7faaf 100644 --- a/src/codegen/tnode.h +++ b/src/codegen/tnode.h @@ -88,7 +88,7 @@ struct ExternalPointerHandleT : Uint32T { static constexpr MachineType kMachineType = MachineType::Uint32(); }; -#ifdef V8_SANDBOXED_EXTERNAL_POINTERS +#ifdef V8_ENABLE_SANDBOX struct ExternalPointerT : Uint32T { static constexpr MachineType kMachineType = MachineType::Uint32(); }; diff --git a/src/common/globals.h b/src/common/globals.h index bd3fd56e87..467a5197ec 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -124,12 +124,6 @@ namespace internal { #define V8_CAN_CREATE_SHARED_HEAP_BOOL false #endif -#ifdef V8_SANDBOXED_EXTERNAL_POINTERS -#define V8_SANDBOXED_EXTERNAL_POINTERS_BOOL true -#else -#define V8_SANDBOXED_EXTERNAL_POINTERS_BOOL false -#endif - #ifdef V8_ENABLE_SANDBOX #define V8_ENABLE_SANDBOX_BOOL true #else @@ -511,7 +505,7 @@ static_assert(kPointerSize == (1 << kPointerSizeLog2)); // This type defines raw storage type for external (or off-V8 heap) pointers // stored on V8 heap. constexpr int kExternalPointerSlotSize = sizeof(ExternalPointer_t); -#ifdef V8_SANDBOXED_EXTERNAL_POINTERS +#ifdef V8_ENABLE_SANDBOX static_assert(kExternalPointerSlotSize == kTaggedSize); #else static_assert(kExternalPointerSlotSize == kSystemPointerSize); diff --git a/src/objects/slots-inl.h b/src/objects/slots-inl.h index 021293b402..989a553f81 100644 --- a/src/objects/slots-inl.h +++ b/src/objects/slots-inl.h @@ -221,7 +221,7 @@ void ExternalPointerSlot::store(Isolate* isolate, Address value, ExternalPointerSlot::RawContent ExternalPointerSlot::GetAndClearContentForSerialization( const DisallowGarbageCollection& no_gc) { -#ifdef V8_SANDBOXED_EXTERNAL_POINTERS +#ifdef V8_ENABLE_SANDBOX ExternalPointerHandle content = Relaxed_LoadHandle(); Relaxed_StoreHandle(kNullExternalPointerHandle); #else @@ -234,7 +234,7 @@ ExternalPointerSlot::GetAndClearContentForSerialization( void ExternalPointerSlot::RestoreContentAfterSerialization( ExternalPointerSlot::RawContent content, const DisallowGarbageCollection& no_gc) { -#ifdef V8_SANDBOXED_EXTERNAL_POINTERS +#ifdef V8_ENABLE_SANDBOX return Relaxed_StoreHandle(content); #else return WriteMaybeUnalignedValue
(address(), content); diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc index af814ed415..4410790f19 100644 --- a/src/snapshot/serializer.cc +++ b/src/snapshot/serializer.cc @@ -1238,7 +1238,7 @@ void Serializer::ObjectSerializer::OutputRawData(Address up_to) { // snapshot deterministic. CHECK_EQ(CodeDataContainer::kCodeCageBaseUpper32BitsOffset + kTaggedSize, CodeDataContainer::kCodeEntryPointOffset); - static byte field_value[kTaggedSize + kExternalPointerSlotSize] = {0}; + static byte field_value[kTaggedSize + kSystemPointerSize] = {0}; OutputRawWithCustomField( sink_, object_start, base, bytes_to_output, CodeDataContainer::kCodeCageBaseUpper32BitsOffset, diff --git a/src/torque/torque-parser.cc b/src/torque/torque-parser.cc index 92481b2395..596cc0740d 100644 --- a/src/torque/torque-parser.cc +++ b/src/torque/torque-parser.cc @@ -68,8 +68,6 @@ class BuildFlags : public ContextualClass { build_flags_["V8_ENABLE_WEBASSEMBLY"] = false; #endif build_flags_["V8_ENABLE_SANDBOX"] = V8_ENABLE_SANDBOX_BOOL; - build_flags_["V8_SANDBOXED_EXTERNAL_POINTERS"] = - V8_SANDBOXED_EXTERNAL_POINTERS_BOOL; build_flags_["DEBUG"] = DEBUG_BOOL; } static bool GetFlag(const std::string& name, const char* production) { diff --git a/src/wasm/wasm-objects.tq b/src/wasm/wasm-objects.tq index 9807983b1d..55a7e7458d 100644 --- a/src/wasm/wasm-objects.tq +++ b/src/wasm/wasm-objects.tq @@ -14,9 +14,9 @@ extern class WasmInstanceObject extends JSObject; // Represents the context of a function that is defined through the JS or C // APIs. Corresponds to the WasmInstanceObject passed to a Wasm function // reference. -// TODO(manoskouk): If V8_SANDBOXED_EXTERNAL_POINTERS, we cannot encode the -// isolate_root as a sandboxed pointer, because that would require having access -// to the isolate root in the first place. +// TODO(manoskouk): If V8_ENABLE_SANDBOX, we cannot encode the isolate_root as +// a sandboxed pointer, because that would require having access to the isolate +// root in the first place. extern class WasmApiFunctionRef extends HeapObject { isolate_root: RawPtr; native_context: NativeContext; diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 0fafc70114..7c1337490a 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -687,9 +687,8 @@ TEST(MakingExternalStringConditions) { CHECK(local_string->CanMakeExternal()); // Tiny strings are not in-place externalizable when pointer compression is - // enabled, but they are if sandboxed external pointers are enabled. - CHECK_EQ(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL || - i::kTaggedSize == i::kSystemPointerSize, + // enabled, but they are if the sandbox is enabled. + CHECK_EQ(V8_ENABLE_SANDBOX_BOOL || i::kTaggedSize == i::kSystemPointerSize, tiny_local_string->CanMakeExternal()); } diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc index 4cc8ca0791..bfabd3ae38 100644 --- a/test/cctest/test-strings.cc +++ b/test/cctest/test-strings.cc @@ -2066,11 +2066,11 @@ TEST(CheckCachedDataInternalExternalUncachedString) { // that we indeed cached it. Handle external_string = Handle::cast(string); - // If sandboxed external pointers are enabled, string objects will always be - // cacheable because they are smaller. - CHECK(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL || external_string->is_uncached()); + // If the sandbox is enabled, string objects will always be cacheable because + // they are smaller. + CHECK(V8_ENABLE_SANDBOX_BOOL || external_string->is_uncached()); CHECK(external_string->resource()->IsCacheable()); - if (!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) { + if (!V8_ENABLE_SANDBOX_BOOL) { CHECK_NOT_NULL(external_string->resource()->cached_data()); CHECK_EQ(external_string->resource()->cached_data(), external_string->resource()->data()); @@ -2109,11 +2109,11 @@ TEST(CheckCachedDataInternalExternalUncachedStringTwoByte) { // that we indeed cached it. Handle external_string = Handle::cast(string); - // If sandboxed external pointers are enabled, string objects will always be - // cacheable because they are smaller. - CHECK(V8_SANDBOXED_EXTERNAL_POINTERS_BOOL || external_string->is_uncached()); + // If the sandbox is enabled, string objects will always be cacheable because + // they are smaller. + CHECK(V8_ENABLE_SANDBOX_BOOL || external_string->is_uncached()); CHECK(external_string->resource()->IsCacheable()); - if (!V8_SANDBOXED_EXTERNAL_POINTERS_BOOL) { + if (!V8_ENABLE_SANDBOX_BOOL) { CHECK_NOT_NULL(external_string->resource()->cached_data()); CHECK_EQ(external_string->resource()->cached_data(), external_string->resource()->data()); diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index 1812c48885..34bcad23fe 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -515,67 +515,67 @@ KNOWN_OBJECTS = { ("read_only_space", 0x04b49): "NativeScopeInfo", ("read_only_space", 0x04b61): "HashSeed", ("old_space", 0x04235): "ArgumentsIteratorAccessor", - ("old_space", 0x04255): "ArrayLengthAccessor", - ("old_space", 0x04275): "BoundFunctionLengthAccessor", - ("old_space", 0x04295): "BoundFunctionNameAccessor", - ("old_space", 0x042b5): "ErrorStackAccessor", - ("old_space", 0x042d5): "FunctionArgumentsAccessor", - ("old_space", 0x042f5): "FunctionCallerAccessor", - ("old_space", 0x04315): "FunctionNameAccessor", - ("old_space", 0x04335): "FunctionLengthAccessor", - ("old_space", 0x04355): "FunctionPrototypeAccessor", - ("old_space", 0x04375): "SharedArrayLengthAccessor", - ("old_space", 0x04395): "StringLengthAccessor", - ("old_space", 0x043b5): "ValueUnavailableAccessor", - ("old_space", 0x043d5): "WrappedFunctionLengthAccessor", - ("old_space", 0x043f5): "WrappedFunctionNameAccessor", - ("old_space", 0x04415): "InvalidPrototypeValidityCell", - ("old_space", 0x0441d): "EmptyScript", - ("old_space", 0x04461): "ManyClosuresCell", - ("old_space", 0x0446d): "ArrayConstructorProtector", - ("old_space", 0x04481): "NoElementsProtector", - ("old_space", 0x04495): "MegaDOMProtector", - ("old_space", 0x044a9): "IsConcatSpreadableProtector", - ("old_space", 0x044bd): "ArraySpeciesProtector", - ("old_space", 0x044d1): "TypedArraySpeciesProtector", - ("old_space", 0x044e5): "PromiseSpeciesProtector", - ("old_space", 0x044f9): "RegExpSpeciesProtector", - ("old_space", 0x0450d): "StringLengthProtector", - ("old_space", 0x04521): "ArrayIteratorProtector", - ("old_space", 0x04535): "ArrayBufferDetachingProtector", - ("old_space", 0x04549): "PromiseHookProtector", - ("old_space", 0x0455d): "PromiseResolveProtector", - ("old_space", 0x04571): "MapIteratorProtector", - ("old_space", 0x04585): "PromiseThenProtector", - ("old_space", 0x04599): "SetIteratorProtector", - ("old_space", 0x045ad): "StringIteratorProtector", - ("old_space", 0x045c1): "StringSplitCache", - ("old_space", 0x049c9): "RegExpMultipleCache", - ("old_space", 0x04dd1): "BuiltinsConstantsTable", - ("old_space", 0x05225): "AsyncFunctionAwaitRejectSharedFun", - ("old_space", 0x05249): "AsyncFunctionAwaitResolveSharedFun", - ("old_space", 0x0526d): "AsyncGeneratorAwaitRejectSharedFun", - ("old_space", 0x05291): "AsyncGeneratorAwaitResolveSharedFun", - ("old_space", 0x052b5): "AsyncGeneratorYieldResolveSharedFun", - ("old_space", 0x052d9): "AsyncGeneratorReturnResolveSharedFun", - ("old_space", 0x052fd): "AsyncGeneratorReturnClosedRejectSharedFun", - ("old_space", 0x05321): "AsyncGeneratorReturnClosedResolveSharedFun", - ("old_space", 0x05345): "AsyncIteratorValueUnwrapSharedFun", - ("old_space", 0x05369): "PromiseAllResolveElementSharedFun", - ("old_space", 0x0538d): "PromiseAllSettledResolveElementSharedFun", - ("old_space", 0x053b1): "PromiseAllSettledRejectElementSharedFun", - ("old_space", 0x053d5): "PromiseAnyRejectElementSharedFun", - ("old_space", 0x053f9): "PromiseCapabilityDefaultRejectSharedFun", - ("old_space", 0x0541d): "PromiseCapabilityDefaultResolveSharedFun", - ("old_space", 0x05441): "PromiseCatchFinallySharedFun", - ("old_space", 0x05465): "PromiseGetCapabilitiesExecutorSharedFun", - ("old_space", 0x05489): "PromiseThenFinallySharedFun", - ("old_space", 0x054ad): "PromiseThrowerFinallySharedFun", - ("old_space", 0x054d1): "PromiseValueThunkFinallySharedFun", - ("old_space", 0x054f5): "ProxyRevokeSharedFun", - ("old_space", 0x05519): "ShadowRealmImportValueFulfilledSFI", - ("old_space", 0x0553d): "SourceTextModuleExecuteAsyncModuleFulfilledSFI", - ("old_space", 0x05561): "SourceTextModuleExecuteAsyncModuleRejectedSFI", + ("old_space", 0x0424d): "ArrayLengthAccessor", + ("old_space", 0x04265): "BoundFunctionLengthAccessor", + ("old_space", 0x0427d): "BoundFunctionNameAccessor", + ("old_space", 0x04295): "ErrorStackAccessor", + ("old_space", 0x042ad): "FunctionArgumentsAccessor", + ("old_space", 0x042c5): "FunctionCallerAccessor", + ("old_space", 0x042dd): "FunctionNameAccessor", + ("old_space", 0x042f5): "FunctionLengthAccessor", + ("old_space", 0x0430d): "FunctionPrototypeAccessor", + ("old_space", 0x04325): "SharedArrayLengthAccessor", + ("old_space", 0x0433d): "StringLengthAccessor", + ("old_space", 0x04355): "ValueUnavailableAccessor", + ("old_space", 0x0436d): "WrappedFunctionLengthAccessor", + ("old_space", 0x04385): "WrappedFunctionNameAccessor", + ("old_space", 0x0439d): "InvalidPrototypeValidityCell", + ("old_space", 0x043a5): "EmptyScript", + ("old_space", 0x043e9): "ManyClosuresCell", + ("old_space", 0x043f5): "ArrayConstructorProtector", + ("old_space", 0x04409): "NoElementsProtector", + ("old_space", 0x0441d): "MegaDOMProtector", + ("old_space", 0x04431): "IsConcatSpreadableProtector", + ("old_space", 0x04445): "ArraySpeciesProtector", + ("old_space", 0x04459): "TypedArraySpeciesProtector", + ("old_space", 0x0446d): "PromiseSpeciesProtector", + ("old_space", 0x04481): "RegExpSpeciesProtector", + ("old_space", 0x04495): "StringLengthProtector", + ("old_space", 0x044a9): "ArrayIteratorProtector", + ("old_space", 0x044bd): "ArrayBufferDetachingProtector", + ("old_space", 0x044d1): "PromiseHookProtector", + ("old_space", 0x044e5): "PromiseResolveProtector", + ("old_space", 0x044f9): "MapIteratorProtector", + ("old_space", 0x0450d): "PromiseThenProtector", + ("old_space", 0x04521): "SetIteratorProtector", + ("old_space", 0x04535): "StringIteratorProtector", + ("old_space", 0x04549): "StringSplitCache", + ("old_space", 0x04951): "RegExpMultipleCache", + ("old_space", 0x04d59): "BuiltinsConstantsTable", + ("old_space", 0x051ad): "AsyncFunctionAwaitRejectSharedFun", + ("old_space", 0x051d1): "AsyncFunctionAwaitResolveSharedFun", + ("old_space", 0x051f5): "AsyncGeneratorAwaitRejectSharedFun", + ("old_space", 0x05219): "AsyncGeneratorAwaitResolveSharedFun", + ("old_space", 0x0523d): "AsyncGeneratorYieldResolveSharedFun", + ("old_space", 0x05261): "AsyncGeneratorReturnResolveSharedFun", + ("old_space", 0x05285): "AsyncGeneratorReturnClosedRejectSharedFun", + ("old_space", 0x052a9): "AsyncGeneratorReturnClosedResolveSharedFun", + ("old_space", 0x052cd): "AsyncIteratorValueUnwrapSharedFun", + ("old_space", 0x052f1): "PromiseAllResolveElementSharedFun", + ("old_space", 0x05315): "PromiseAllSettledResolveElementSharedFun", + ("old_space", 0x05339): "PromiseAllSettledRejectElementSharedFun", + ("old_space", 0x0535d): "PromiseAnyRejectElementSharedFun", + ("old_space", 0x05381): "PromiseCapabilityDefaultRejectSharedFun", + ("old_space", 0x053a5): "PromiseCapabilityDefaultResolveSharedFun", + ("old_space", 0x053c9): "PromiseCatchFinallySharedFun", + ("old_space", 0x053ed): "PromiseGetCapabilitiesExecutorSharedFun", + ("old_space", 0x05411): "PromiseThenFinallySharedFun", + ("old_space", 0x05435): "PromiseThrowerFinallySharedFun", + ("old_space", 0x05459): "PromiseValueThunkFinallySharedFun", + ("old_space", 0x0547d): "ProxyRevokeSharedFun", + ("old_space", 0x054a1): "ShadowRealmImportValueFulfilledSFI", + ("old_space", 0x054c5): "SourceTextModuleExecuteAsyncModuleFulfilledSFI", + ("old_space", 0x054e9): "SourceTextModuleExecuteAsyncModuleRejectedSFI", } # Lower 32 bits of first page addresses for various heap spaces. From 053c172d6ccc3c20ad4893ba7e0e0682ce9453bd Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Mon, 12 Sep 2022 04:19:55 -0700 Subject: [PATCH 0046/1772] Update V8 DEPS (trusted) Rolling v8/base/trace_event/common: https://chromium.googlesource.com/chromium/src/base/trace_event/common/+log/640fc6d..521ac34 Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/24bb610..6180903 Rolling v8/buildtools: https://chromium.googlesource.com/chromium/src/buildtools/+log/46ab4c3..a7f5ad0 Rolling v8/buildtools/linux64: git_revision:00b741b1568d56cf4e117dcb9f70cd42653b4c78..git_revision:b4851eb2062f76a880c07f7fa0d12913beb6d79e Rolling v8/buildtools/third_party/libc++/trunk: https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx/+log/85a3363..60f9078 Rolling v8/buildtools/third_party/libc++abi/trunk: https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxxabi/+log/6285577..5c3e02e Rolling v8/buildtools/third_party/libunwind/trunk: https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libunwind/+log/42aa6de..60a480e Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/7ee0711..861067d Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/a089281..2d25dbd Rolling v8/third_party/fuchsia-sdk/sdk: version:9.20220902.1.1..version:9.20220912.0.1 Rolling v8/third_party/zlib: https://chromium.googlesource.com/chromium/src/third_party/zlib/+log/9f4113d..05e137d Rolling v8/tools/clang: https://chromium.googlesource.com/chromium/src/tools/clang/+log/0a22859..2a5ebae Rolling v8/tools/luci-go: git_revision:3226112a79a7c2de84c3186191e24dd61680a77d..git_revision:c93fd3c5ebdc3999eea86a7623dbd1ed4b40bc78 Rolling v8/tools/luci-go: git_revision:3226112a79a7c2de84c3186191e24dd61680a77d..git_revision:c93fd3c5ebdc3999eea86a7623dbd1ed4b40bc78 R=v8-waterfall-sheriff@grotations.appspotmail.com,mtv-sf-v8-sheriff@grotations.appspotmail.com Change-Id: Ife89abccaa2696ade97bb5640010c4f5bdc7009c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891191 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#83131} --- DEPS | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/DEPS b/DEPS index efd9e57ad1..8bda762ce8 100644 --- a/DEPS +++ b/DEPS @@ -46,15 +46,15 @@ vars = { 'reclient_version': 're_client_version:0.69.0.458df98-gomaip', # GN CIPD package version. - 'gn_version': 'git_revision:00b741b1568d56cf4e117dcb9f70cd42653b4c78', + 'gn_version': 'git_revision:b4851eb2062f76a880c07f7fa0d12913beb6d79e', # luci-go CIPD package version. - 'luci_go': 'git_revision:3226112a79a7c2de84c3186191e24dd61680a77d', + 'luci_go': 'git_revision:c93fd3c5ebdc3999eea86a7623dbd1ed4b40bc78', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:9.20220902.1.1', + 'fuchsia_version': 'version:9.20220912.0.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -92,11 +92,11 @@ vars = { deps = { 'base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '640fc6dc86d5e75e6c7e8006cb45fb46c91014e0', + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '24bb6108a40a44bda3a7fa88d906c0774034df08', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '6180903f18b88b0263a59a547ab083c3baf1362d', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '46ab4c32d461f34456161fac6cd58d203c5083e9', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'a7f5ad05c477e997b063b250eae6529ecc460a9f', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7', 'buildtools/linux64': { @@ -120,11 +120,11 @@ deps = { 'condition': 'host_os == "mac"', }, 'buildtools/third_party/libc++/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '85a3363f04e1e0e7b85d62d5d9a419e039755262', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '60f90783c34aeab2c49682c6d4ce5520c8cb56b3', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '6285577a9df73170c1496b78542a2c18fa2352fd', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '5c3e02e92ae8bbc1bf1001bd9ef0d76e044ddb86', 'buildtools/third_party/libunwind/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '42aa6de5544ec1ccc27da640a044bd3f474ee75a', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '60a480ee1819266cf8054548454f99838583cd76', 'buildtools/win': { 'packages': [ { @@ -198,7 +198,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '7ee071132a536a6616589cc2411674d1b459b4da', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '861067db62eda94b3c144afd46fae5903e9e11f0', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -206,7 +206,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'a089281a82f9481e246dcc1292145d0e8635f51c', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '2d25dbd149b460cc1fa96acbcb1797a12b3c0771', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { @@ -249,9 +249,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '9f4113d3bae3285a4511fd7c827baf64b4f9eb4b', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '05e137d33c6a11a93cefe6553f4f983edf9b2de4', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '0a2285903bf27182c56d8a1cc8b0e0d8a1ce8c31', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '2a5ebae0f797d7ad1f27d7f20bd926ce76c29411', 'tools/luci-go': { 'packages': [ { From 9c95863d55505a6893b34fa185f926d079691b24 Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Mon, 12 Sep 2022 13:23:44 +0200 Subject: [PATCH 0047/1772] [test] Better finish terminating workers A call to cancel_join_thread() is removed as it is suspected to leave the done_queue with garbled data on process join. Bug: v8:13113 Change-Id: I85a736cee98d1c2a315efdd468cde216ad848c99 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891251 Reviewed-by: Liviu Rau Commit-Queue: Michael Achenbach Cr-Commit-Position: refs/heads/main@{#83132} --- tools/testrunner/local/pool.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/tools/testrunner/local/pool.py b/tools/testrunner/local/pool.py index 5761c73669..bf04e1f055 100644 --- a/tools/testrunner/local/pool.py +++ b/tools/testrunner/local/pool.py @@ -93,11 +93,6 @@ def Worker(fn, work_queue, done_queue, except Exception as e: logging.exception('Unhandled error during worker execution.') done_queue.put(ExceptionResult(e)) - # When we reach here on normal tear down, all items have been pulled from - # the done_queue before and this should have no effect. On fast abort, it's - # possible that a fast worker left items on the done_queue in memory, which - # will never be pulled. This call purges those to avoid a deadlock. - done_queue.cancel_join_thread() except KeyboardInterrupt: assert False, 'Unreachable' From c8d1ca8a2c445d4031852398c50c2d1b095727f1 Mon Sep 17 00:00:00 2001 From: Seth Brenith Date: Fri, 9 Sep 2022 10:53:47 -0700 Subject: [PATCH 0048/1772] Fix crash in background merging of deserialized scripts BackgroundMergeTask::CompleteMergeInForeground contained an incorrect assumption that some SharedFunctionInfos would have bytecode arrays. Bug: v8:12808, chromium:1360024 Change-Id: I42ca22fc3a4412aea5e5a433e63c685eaf2af242 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3888198 Reviewed-by: Leszek Swirski Commit-Queue: Seth Brenith Cr-Commit-Position: refs/heads/main@{#83133} --- src/codegen/compiler.cc | 10 +++++++--- test/unittests/api/deserialize-unittest.cc | 15 +++++++++++++++ 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/src/codegen/compiler.cc b/src/codegen/compiler.cc index 8d4c972c3c..a0636afc78 100644 --- a/src/codegen/compiler.cc +++ b/src/codegen/compiler.cc @@ -2099,11 +2099,15 @@ Handle BackgroundMergeTask::CompleteMergeInForeground( // pools is required. if (forwarder.HasAnythingToForward()) { for (Handle new_sfi : used_new_sfis_) { - forwarder.AddBytecodeArray(new_sfi->GetBytecodeArray(isolate)); + if (new_sfi->HasBytecodeArray(isolate)) { + forwarder.AddBytecodeArray(new_sfi->GetBytecodeArray(isolate)); + } } for (const auto& new_compiled_data : new_compiled_data_for_cached_sfis_) { - forwarder.AddBytecodeArray( - new_compiled_data.cached_sfi->GetBytecodeArray(isolate)); + if (new_compiled_data.cached_sfi->HasBytecodeArray(isolate)) { + forwarder.AddBytecodeArray( + new_compiled_data.cached_sfi->GetBytecodeArray(isolate)); + } } forwarder.IterateAndForwardPointers(); } diff --git a/test/unittests/api/deserialize-unittest.cc b/test/unittests/api/deserialize-unittest.cc index 7bb8b216dc..a1237b6bc3 100644 --- a/test/unittests/api/deserialize-unittest.cc +++ b/test/unittests/api/deserialize-unittest.cc @@ -617,4 +617,19 @@ TEST_F(MergeDeserializedCodeTest, MainThreadReMerge) { true); // lazy_should_be_compiled } +TEST_F(MergeDeserializedCodeTest, Regress1360024) { + // This test case triggers a re-merge on the main thread, similar to + // MainThreadReMerge. However, it does not retain the lazy function's SFI at + // any step, which causes the merge to use the SFI from the newly deserialized + // script for that function. This exercises a bug in the original + // implementation where the re-merging on the main thread would crash if the + // merge algorithm had selected any uncompiled SFIs from the new script. + TestOffThreadMerge(kToplevelAndEager, // retained_before_background_merge + kToplevelAndEager, // aged_before_background_merge + true, // run_code_after_background_merge + kToplevelAndEager, // retained_after_background_merge + kToplevelSfiFlag, // aged_after_background_merge + true); // lazy_should_be_compiled +} + } // namespace v8 From 187fba742d4f4d2a13460ed4f2205f4473fa6da8 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 12 Sep 2022 15:11:44 +0200 Subject: [PATCH 0049/1772] [maglev] Distinguish receiver and lookup_start_object GetNamedPropertyFromSuper needs both the receiver and the lookup_start_object (the home object prototype), as it does lookups on the latter but calls accessors with the former as the receiver. Bug: v8:7700 Change-Id: Ib8b930d06eb8bed090ad1839a05514f0dffc321f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891253 Commit-Queue: Leszek Swirski Commit-Queue: Igor Sheludko Auto-Submit: Leszek Swirski Reviewed-by: Igor Sheludko Cr-Commit-Position: refs/heads/main@{#83134} --- src/maglev/maglev-graph-builder.cc | 32 +++++++++++++++++------------- src/maglev/maglev-graph-builder.h | 10 +++++++--- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 23519f7c42..6d64c91bce 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -987,14 +987,15 @@ void MaglevGraphBuilder::BuildMapCheck(ValueNode* object, object, known_info, NodeType::kHeapObjectWithKnownMap); } -bool MaglevGraphBuilder::TryBuildMonomorphicLoad(ValueNode* object, +bool MaglevGraphBuilder::TryBuildMonomorphicLoad(ValueNode* receiver, + ValueNode* lookup_start_object, const compiler::MapRef& map, MaybeObjectHandle handler) { if (handler.is_null()) return false; if (handler->IsSmi()) { - return TryBuildMonomorphicLoadFromSmiHandler(object, map, - handler->ToSmi().value()); + return TryBuildMonomorphicLoadFromSmiHandler(receiver, lookup_start_object, + map, handler->ToSmi().value()); } HeapObject ho_handler; if (!handler->GetHeapObject(&ho_handler)) return false; @@ -1007,26 +1008,27 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoad(ValueNode* object, return false; } else { return TryBuildMonomorphicLoadFromLoadHandler( - object, map, LoadHandler::cast(ho_handler)); + receiver, lookup_start_object, map, LoadHandler::cast(ho_handler)); } } bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromSmiHandler( - ValueNode* object, const compiler::MapRef& map, int32_t handler) { + ValueNode* receiver, ValueNode* lookup_start_object, + const compiler::MapRef& map, int32_t handler) { // Smi handler, emit a map check and LoadField. LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler); if (kind != LoadHandler::Kind::kField) return false; if (LoadHandler::IsWasmStructBits::decode(handler)) return false; - BuildMapCheck(object, map); + BuildMapCheck(lookup_start_object, map); ValueNode* load_source; if (LoadHandler::IsInobjectBits::decode(handler)) { - load_source = object; + load_source = lookup_start_object; } else { // The field is in the property array, first load it from there. load_source = AddNewNode( - {object}, JSReceiver::kPropertiesOrHashOffset); + {lookup_start_object}, JSReceiver::kPropertiesOrHashOffset); } int field_index = LoadHandler::FieldIndexBits::decode(handler); if (LoadHandler::IsDoubleBits::decode(handler)) { @@ -1051,7 +1053,8 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromSmiHandler( } bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromLoadHandler( - ValueNode* object, const compiler::MapRef& map, LoadHandler handler) { + ValueNode* receiver, ValueNode* lookup_start_object, + const compiler::MapRef& map, LoadHandler handler) { Object maybe_smi_handler = handler.smi_handler(local_isolate_); if (!maybe_smi_handler.IsSmi()) return false; int smi_handler = Smi::cast(maybe_smi_handler).value(); @@ -1069,11 +1072,11 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromLoadHandler( // Check for string maps before checking if we need to do an access check. // Primitive strings always get the prototype from the native context // they're operated on, so they don't need the access check. - BuildCheckString(object); + BuildCheckString(lookup_start_object); } else if (do_access_check_on_lookup_start_object) { return false; } else { - BuildMapCheck(object, map); + BuildMapCheck(lookup_start_object, map); } Object validity_cell = handler.validity_cell(local_isolate_); @@ -1120,7 +1123,7 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromLoadHandler( Call* call = CreateNewNode(Call::kFixedInputCount + 1, ConvertReceiverMode::kNotNullOrUndefined, GetConstant(getter_ref), GetContext()); - call->set_arg(0, object); + call->set_arg(0, receiver); SetAccumulator(AddNode(call)); break; } @@ -1158,7 +1161,7 @@ void MaglevGraphBuilder::VisitGetNamedProperty() { MaybeObjectHandle handler = FeedbackNexusForSlot(slot).FindHandlerForMap(map.object()); - if (TryBuildMonomorphicLoad(object, map, handler)) return; + if (TryBuildMonomorphicLoad(object, object, map, handler)) return; } break; default: @@ -1206,7 +1209,8 @@ void MaglevGraphBuilder::VisitGetNamedPropertyFromSuper() { MaybeObjectHandle handler = FeedbackNexusForSlot(slot).FindHandlerForMap(map.object()); - if (TryBuildMonomorphicLoad(lookup_start_object, map, handler)) return; + if (TryBuildMonomorphicLoad(receiver, lookup_start_object, map, handler)) + return; } break; default: diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index 9fff3b7275..c6aab2587c 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -956,12 +956,16 @@ class MaglevGraphBuilder { void BuildCheckSymbol(ValueNode* object); void BuildMapCheck(ValueNode* object, const compiler::MapRef& map); - bool TryBuildMonomorphicLoad(ValueNode* object, const compiler::MapRef& map, + bool TryBuildMonomorphicLoad(ValueNode* receiver, + ValueNode* lookup_start_object, + const compiler::MapRef& map, MaybeObjectHandle handler); - bool TryBuildMonomorphicLoadFromSmiHandler(ValueNode* object, + bool TryBuildMonomorphicLoadFromSmiHandler(ValueNode* receiver, + ValueNode* lookup_start_object, const compiler::MapRef& map, int32_t handler); - bool TryBuildMonomorphicLoadFromLoadHandler(ValueNode* object, + bool TryBuildMonomorphicLoadFromLoadHandler(ValueNode* receiver, + ValueNode* lookup_start_object, const compiler::MapRef& map, LoadHandler handler); From 9f454ee1185bf321d02dd96e1df22b22bfde3054 Mon Sep 17 00:00:00 2001 From: Camillo Bruni Date: Mon, 12 Sep 2022 14:53:24 +0200 Subject: [PATCH 0050/1772] [tools] Skip over group entries in RCS input file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit callstats.html creates grouped entries on the fly. Thus we can safely ignore already added group entries from the input file. Change-Id: I5a17fc895c4d36bfd7b79fcdb6d4644498998f86 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890977 Commit-Queue: Camillo Bruni Reviewed-by: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#83135} --- tools/callstats.html | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/tools/callstats.html b/tools/callstats.html index f9506868cf..d67c287959 100644 --- a/tools/callstats.html +++ b/tools/callstats.html @@ -1966,6 +1966,11 @@ code is governed by a BSD-style license that can be found in the LICENSE file. }; } add(entry) { + // Ignore accidentally added Group entries. + if (entry.name.startsWith(GroupedEntry.prefix)) { + console.warn("Skipping accidentally added Group entry:", entry, this); + return; + } let existingEntry = this.entryDict.get(entry.name); if (existingEntry !== undefined) { // Duplicate entries happen when multiple runs are combined into a @@ -1976,11 +1981,6 @@ code is governed by a BSD-style license that can be found in the LICENSE file. if (group.addTimeAndCount(entry)) return; } } else { - // Ignore accidentally added Group entries. - if (entry.name.startsWith(GroupedEntry.prefix)) { - console.warn("Skipping accidentally added Group entry:", entry, this); - return; - } entry.page = this; this.entryDict.set(entry.name, entry); for (let group of this.groups) { @@ -2054,12 +2054,22 @@ code is governed by a BSD-style license that can be found in the LICENSE file. // or the new object style. if (Array.isArray(data)) { for (let i = 0; i < data.length; i++) { - page.add(Entry.fromLegacyJSON(i, data[data.length - i - 1])); + const entryData = data[data.length - i - 1]; + const metricName = entryData[0]; + if (metricName.startsWith(GroupedEntry.prefix)) { + console.warn(`Ignoring input group-entry "${metricName}".`); + continue; + }; + page.add(Entry.fromLegacyJSON(i, entryData)); } } else { let position = 0; - for (let metric_name in data) { - page.add(Entry.fromJSON(position, metric_name, data[metric_name])); + for (let metricName in data) { + if (metricName.startsWith(GroupedEntry.prefix)) { + console.warn(`Ignoring input group-entry "${metricName}".`); + continue; + } + page.add(Entry.fromJSON(position, metricName, data[metricName])); position++; } } From a3d42f5669a96ff4e92a713ab40d4b6ef522f4ec Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Mon, 12 Sep 2022 13:06:21 +0200 Subject: [PATCH 0051/1772] [wasm-gc] Element printing of remaining array types Bug: v8:7748 Change-Id: Ic8b140c2dbf24171fe75b4feea04101f8c22e4dc Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890992 Reviewed-by: Jakob Kummerow Commit-Queue: Manos Koukoutos Cr-Commit-Position: refs/heads/main@{#83136} --- src/diagnostics/objects-printer.cc | 36 +++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index 41a42473dc..4e8e0f829a 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -1973,13 +1973,39 @@ void WasmArray::WasmArrayPrint(std::ostream& os) { PrintTypedArrayElements(os, reinterpret_cast(data_ptr), len, true); break; - case wasm::kS128: case wasm::kRef: - case wasm::kRefNull: - case wasm::kRtt: - os << "\n Printing elements of this type is unimplemented, sorry"; - // TODO(7748): Implement. + case wasm::kRefNull: { + os << "\n - elements:"; + constexpr uint32_t kWasmArrayMaximumPrintedElements = 5; + for (uint32_t i = 0; + i < std::min(this->length(), kWasmArrayMaximumPrintedElements); + i++) { + os << "\n " << static_cast(i) << " - " + << Brief(TaggedField::load(*this, this->element_offset(i))); + } + if (this->length() > kWasmArrayMaximumPrintedElements) os << "\n ..."; break; + } + case wasm::kS128: { + os << "\n - elements:"; + constexpr uint32_t kWasmArrayMaximumPrintedElements = 5; + for (uint32_t i = 0; + i < std::min(this->length(), kWasmArrayMaximumPrintedElements); + i++) { + os << "\n " << static_cast(i) << " - 0x" << std::hex; +#ifdef V8_TARGET_BIG_ENDIAN + for (int j = 0; j < kSimd128Size; j++) { +#else + for (int j = kSimd128Size - 1; j >= 0; j--) { +#endif + os << reinterpret_cast(this->ElementAddress(i))[j]; + } + os << std::dec; + } + if (this->length() > kWasmArrayMaximumPrintedElements) os << "\n ..."; + break; + } + case wasm::kRtt: case wasm::kBottom: case wasm::kVoid: UNREACHABLE(); From f550ba8db3af199a74fd0e9037c7549be3890a98 Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Mon, 12 Sep 2022 13:07:47 +0200 Subject: [PATCH 0052/1772] [wasm][test] Add missing unrolling test Bug: v8:12166 Change-Id: Ib1d9ac90a2b9c03915c496f1d23586ab8a94aef7 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891209 Commit-Queue: Manos Koukoutos Reviewed-by: Jakob Kummerow Cr-Commit-Position: refs/heads/main@{#83137} --- test/mjsunit/wasm/loop-unrolling.js | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/test/mjsunit/wasm/loop-unrolling.js b/test/mjsunit/wasm/loop-unrolling.js index 9a358246f7..db9849ab47 100644 --- a/test/mjsunit/wasm/loop-unrolling.js +++ b/test/mjsunit/wasm/loop-unrolling.js @@ -38,7 +38,28 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js"); (function MultiBlockResultTest() { print(arguments.callee.name); let builder = new WasmModuleBuilder(); - // TODO(manoskouk): Rewrite this test. + let sig = builder.addType(kSig_ii_ii); + + // f(a, b) = a + b + b + b - a*b*b*b + builder.addFunction("main", kSig_i_ii) + .addLocals(kWasmI32, 2) + .addBody([ + kExprLocalGet, 0, kExprLocalGet, 0, + kExprLoop, sig, + kExprLocalSet, 2, // Temporarily store the second value. + kExprLocalGet, 1, kExprI32Add, + // multiply the second value by 2 + kExprLocalGet, 2, kExprLocalGet, 1, kExprI32Mul, + // Increment counter, then loop if <= 3. + kExprLocalGet, 3, kExprI32Const, 1, kExprI32Add, kExprLocalSet, 3, + kExprLocalGet, 3, kExprI32Const, 3, kExprI32LtS, + kExprBrIf, 0, + kExprEnd, + kExprI32Sub]) + .exportFunc(); + + let instance = builder.instantiate(); + assertEquals(10 + 5 + 5 + 5 - (10 * 5 * 5 * 5), instance.exports.main(10, 5)) })(); // Test the interaction between tail calls and loop unrolling. From 29aed83f3334d20a98359dfcd454c5f58b4f11c4 Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Sat, 10 Sep 2022 16:28:51 -0700 Subject: [PATCH 0053/1772] [test262] Roll test262 https://chromium.googlesource.com/external/github.com/tc39/test262/+log/8dcc0e19..7461973 Bug: v8:7834 Change-Id: I2dc32c22a01f0a6729e326864812f4230ad6ac54 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3880731 Reviewed-by: Shu-yu Guo Commit-Queue: Adam Klein Cr-Commit-Position: refs/heads/main@{#83138} --- DEPS | 2 +- test/test262/test262.status | 7 +++++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/DEPS b/DEPS index 8bda762ce8..0e0fff3c4b 100644 --- a/DEPS +++ b/DEPS @@ -150,7 +150,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '8dcc0e1955b1753271ed0812d1a2a15a23de069b', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '746197355c1705b7d4463fc75c29433c0ce2fd0d', 'third_party/android_ndk': { 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d', 'condition': 'checkout_android', diff --git a/test/test262/test262.status b/test/test262/test262.status index ac2dfe5bc6..9c71d131f5 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -225,6 +225,10 @@ 'language/expressions/async-generator/generator-created-after-decl-inst': [FAIL], 'language/statements/async-generator/generator-created-after-decl-inst': [FAIL], + # https://bugs.chromium.org/p/v8/issues/detail?id=13275 + 'language/statements/async-generator/yield-star-promise-not-unwrapped': [FAIL], + 'language/statements/async-generator/yield-star-return-then-getter-ticks': [FAIL], + # https://bugs.chromium.org/p/v8/issues/detail?id=9875 'language/expressions/coalesce/tco-pos-undefined': [FAIL], 'language/expressions/coalesce/tco-pos-null': [FAIL], @@ -583,6 +587,9 @@ 'staging/Intl402/Temporal/old/yearmonth-toLocaleString': [FAIL], 'staging/Intl402/Temporal/old/zoneddatetime-toLocaleString': [FAIL], + # https://github.com/tc39/proposal-intl-numberformat-v3/pull/107 + 'intl402/NumberFormat/test-option-useGrouping': [FAIL], + # https://bugs.chromium.org/p/v8/issues/detail?id=11660 'intl402/DurationFormat/prototype/prototype_attributes': [FAIL], 'intl402/DurationFormat/prototype/constructor/prop-desc': [FAIL], From 3868e2ceb47ad520722888496f3cef12f1950e72 Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Fri, 9 Sep 2022 17:09:38 -0700 Subject: [PATCH 0054/1772] [strings] Use current isolate when externalizing shared strings v8::String::MakeExternal is currently incorrectly using the shared isolate of the shared string, which will race when setting VM state. In general the shared Isolate shouldn't be used for anything, it's an implementation detail to hold the shared heap space. Bug: v8:12007, v8:13276 Fixed: v8:13276 Change-Id: I21ec57645ed4740a4c19c51b8fa1e2928a07a0f4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3888384 Reviewed-by: Adam Klein Auto-Submit: Shu-yu Guo Commit-Queue: Shu-yu Guo Cr-Commit-Position: refs/heads/main@{#83139} --- src/api/api.cc | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/src/api/api.cc b/src/api/api.cc index 96e8b69e29..608da3369f 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -7055,9 +7055,16 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) { return false; } - // It is safe to call GetIsolateFromWritableHeapObject because - // SupportsExternalization already checked that the object is writable. - i::Isolate* i_isolate = i::GetIsolateFromWritableObject(obj); + // TODO(v8:12007): Consider adding + // MakeExternal(Isolate*, ExternalStringResource*). + i::Isolate* i_isolate; + if (obj.IsShared()) { + i_isolate = i::Isolate::Current(); + } else { + // It is safe to call GetIsolateFromWritableHeapObject because + // SupportsExternalization already checked that the object is writable. + i_isolate = i::GetIsolateFromWritableObject(obj); + } ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); CHECK(resource && resource->data()); @@ -7081,9 +7088,16 @@ bool v8::String::MakeExternal( return false; } - // It is safe to call GetIsolateFromWritableHeapObject because - // SupportsExternalization already checked that the object is writable. - i::Isolate* i_isolate = i::GetIsolateFromWritableObject(obj); + // TODO(v8:12007): Consider adding + // MakeExternal(Isolate*, ExternalOneByteStringResource*). + i::Isolate* i_isolate; + if (obj.IsShared()) { + i_isolate = i::Isolate::Current(); + } else { + // It is safe to call GetIsolateFromWritableHeapObject because + // SupportsExternalization already checked that the object is writable. + i_isolate = i::GetIsolateFromWritableObject(obj); + } ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); CHECK(resource && resource->data()); From e678d10e0cf43ab523673244679a03df44d97cd1 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Mon, 12 Sep 2022 20:09:46 -0700 Subject: [PATCH 0055/1772] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/6180903..7fcb69a Rolling v8/buildtools: https://chromium.googlesource.com/chromium/src/buildtools/+log/a7f5ad0..4276428 Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/861067d..4864449 Rolling v8/third_party/fuchsia-sdk/sdk: version:9.20220912.0.1..version:9.20220912.3.1 R=v8-waterfall-sheriff@grotations.appspotmail.com,mtv-sf-v8-sheriff@grotations.appspotmail.com Change-Id: I93a73b86e70bcc8c1aa9a4ae61c6aa15ec37cdc1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3893410 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#83140} --- DEPS | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/DEPS b/DEPS index 0e0fff3c4b..02fa1342f4 100644 --- a/DEPS +++ b/DEPS @@ -54,7 +54,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:9.20220912.0.1', + 'fuchsia_version': 'version:9.20220912.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -94,9 +94,9 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '6180903f18b88b0263a59a547ab083c3baf1362d', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '7fcb69a42d71a2ab52b833bdc5f0e83536c31ef4', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'a7f5ad05c477e997b063b250eae6529ecc460a9f', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '42764285a09b521f7764ceff8f8dbefa8dd26cb6', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7', 'buildtools/linux64': { @@ -198,7 +198,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '861067db62eda94b3c144afd46fae5903e9e11f0', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '486444967e3ba7da8e2a97b5a4f39f58125b2ab1', 'condition': 'checkout_android', }, 'third_party/colorama/src': { From 0381aead8451746dfff14d3de82b443c67ecf62c Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Mon, 12 Sep 2022 16:12:23 -0700 Subject: [PATCH 0056/1772] [Temporal] Sync BalanceISODate to PR 2178 Only change the implementation in BalanceISODate from https://github.com/tc39/proposal-temporal/pull/2178/files#diff-113bc23f7ddc769c78deac4268f2400a0a8ca75258f4a6a8af8219cf430a0788 Changes of other AOs in that PR is not in this cl. Note: Split from cl/3864358 Bug: v8:11544 Change-Id: I8c8514642cdb522975b23bcc9c2bb9eb56cb2839 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892177 Reviewed-by: Adam Klein Commit-Queue: Frank Tang Cr-Commit-Position: refs/heads/main@{#83141} --- src/objects/js-temporal-objects.cc | 101 +++++++++-------------------- test/test262/test262.status | 6 -- 2 files changed, 32 insertions(+), 75 deletions(-) diff --git a/src/objects/js-temporal-objects.cc b/src/objects/js-temporal-objects.cc index 6ac2d6387c..a098eb454f 100644 --- a/src/objects/js-temporal-objects.cc +++ b/src/objects/js-temporal-objects.cc @@ -4964,80 +4964,43 @@ Handle UnitToString(Isolate* isolate, Unit unit) { } } +// #sec-temporal-create-iso-date-record +DateRecordCommon CreateISODateRecord(Isolate* isolate, + const DateRecordCommon& date) { + // 1. Assert: IsValidISODate(year, month, day) is true. + DCHECK(IsValidISODate(isolate, date)); + // 2. Return the Record { [[Year]]: year, [[Month]]: month, [[Day]]: day }. + return date; +} + // #sec-temporal-balanceisodate DateRecordCommon BalanceISODate(Isolate* isolate, const DateRecordCommon& date) { TEMPORAL_ENTER_FUNC(); + // 1. Let epochDays be MakeDay(𝔽(year), 𝔽(month - 1), 𝔽(day)). + double epoch_days = MakeDay(date.year, date.month - 1, date.day); + // 2. Assert: epochDays is finite. + DCHECK(std::isfinite(epoch_days)); + // 3. Let ms be MakeDate(epochDays, +0𝔽). + double ms = MakeDate(epoch_days, 0); + // 4. Return CreateISODateRecord(ℝ(YearFromTime(ms)), ℝ(MonthFromTime(ms)) + + // 1, ℝ(DateFromTime(ms))). + int year = 0; + int month = 0; + int day = 0; + int wday = 0; + int hour = 0; + int minute = 0; + int second = 0; + int millisecond = 0; - DateRecordCommon result = date; - // 1. Assert: year, month, and day are integers. - // 2. Let balancedYearMonth be ! BalanceISOYearMonth(year, month). - // 3. Set month to balancedYearMonth.[[Month]]. - // 4. Set year to balancedYearMonth.[[Year]]. - BalanceISOYearMonth(isolate, &(result.year), &(result.month)); - // 5. NOTE: To deal with negative numbers of days whose absolute value is - // greater than the number of days in a year, the following section subtracts - // years and adds days until the number of days is greater than −366 or −365. - // 6. If month > 2, then - // a. Let testYear be year. - // 7. Else, - // a. Let testYear be year − 1. - int32_t test_year = (date.month > 2) ? date.year : date.year - 1; - // 8. Repeat, while day < −1 × ! ISODaysInYear(testYear), - int32_t iso_days_in_year; - while (result.day < -(iso_days_in_year = ISODaysInYear(isolate, test_year))) { - // a. Set day to day + ! ISODaysInYear(testYear). - result.day += iso_days_in_year; - // b. Set year to year − 1. - (result.year)--; - // c. Set testYear to testYear − 1. - test_year--; - } - // 9. NOTE: To deal with numbers of days greater than the number of days in a - // year, the following section adds years and subtracts days until the number - // of days is less than 366 or 365. - // 10. Let testYear be year + 1. - test_year = (result.year) + 1; - // 11. Repeat, while day > ! ISODaysInYear(testYear), - while (result.day > (iso_days_in_year = ISODaysInYear(isolate, test_year))) { - // a. Set day to day − ! ISODaysInYear(testYear). - result.day -= iso_days_in_year; - // b. Set year to year + 1. - result.year++; - // c. Set testYear to testYear + 1. - test_year++; - } - // 12. NOTE: To deal with negative numbers of days whose absolute value is - // greater than the number of days in the current month, the following section - // subtracts months and adds days until the number of days is greater than 0. - // 13. Repeat, while day < 1, - while (result.day < 1) { - // a. Set balancedYearMonth to ! BalanceISOYearMonth(year, month − 1). - // b. Set year to balancedYearMonth.[[Year]]. - // c. Set month to balancedYearMonth.[[Month]]. - result.month -= 1; - BalanceISOYearMonth(isolate, &(result.year), &(result.month)); - // d. Set day to day + ! ISODaysInMonth(year, month). - result.day += ISODaysInMonth(isolate, result.year, result.month); - } - // 14. NOTE: To deal with numbers of days greater than the number of days in - // the current month, the following section adds months and subtracts days - // until the number of days is less than the number of days in the month. - // 15. Repeat, while day > ! ISODaysInMonth(year, month), - int32_t iso_days_in_month; - while (result.day > (iso_days_in_month = ISODaysInMonth(isolate, result.year, - result.month))) { - // a. Set day to day − ! ISODaysInMonth(year, month). - result.day -= iso_days_in_month; - // b. Set balancedYearMonth to ! BalanceISOYearMonth(year, month + 1). - // c. Set year to balancedYearMonth.[[Year]]. - // d. Set month to balancedYearMonth.[[Month]]. - result.month += 1; - BalanceISOYearMonth(isolate, &(result.year), &(result.month)); - } - // 16. Return the new Record { [[Year]]: year, [[Month]]: month, [[Day]]: day - // }. - return result; + DCHECK(std::isfinite(ms)); + DCHECK_LT(ms, static_cast(std::numeric_limits::max())); + DCHECK_GT(ms, static_cast(std::numeric_limits::min())); + isolate->date_cache()->BreakDownTime(ms, &year, &month, &day, &wday, &hour, + &minute, &second, &millisecond); + + return CreateISODateRecord(isolate, {year, month + 1, day}); } // #sec-temporal-adddatetime diff --git a/test/test262/test262.status b/test/test262/test262.status index 9c71d131f5..881d55aaa8 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -444,10 +444,6 @@ 'built-ins/RegExp/prototype/Symbol.replace/get-flags-err': [FAIL], 'built-ins/RegExp/prototype/Symbol.replace/get-unicode-error': [FAIL], - # PlainTime RelativeTime - # https://github.com/tc39/proposal-temporal/pull/1862 - 'built-ins/Temporal/Duration/prototype/total/timezone-getpossibleinstantsfor-iterable': [FAIL], - # https://bugs.chromium.org/p/v8/issues/detail?id=11544 'built-ins/Temporal/Duration/prototype/total/balance-negative-result': [FAIL], 'intl402/Temporal/Calendar/prototype/dateFromFields/infinity-throws-rangeerror': [FAIL], @@ -530,7 +526,6 @@ 'built-ins/Temporal/Duration/prototype/subtract/relativeto-propertybag-calendar-number': [FAIL], 'built-ins/Temporal/Duration/prototype/total/relativeto-propertybag-calendar-number': [FAIL], - 'built-ins/Temporal/Duration/prototype/add/relativeto-year': [FAIL], 'intl402/Temporal/Calendar/prototype/dateFromFields/order-of-operations': [FAIL], 'intl402/Temporal/Calendar/prototype/monthDayFromFields/order-of-operations': [FAIL], 'intl402/Temporal/Calendar/prototype/yearMonthFromFields/order-of-operations': [FAIL], @@ -554,7 +549,6 @@ 'staging/Temporal/Regex/old/timezone': [FAIL], 'staging/Temporal/TimeZone/old/getNextTransition': [FAIL], 'staging/Temporal/TimeZone/old/subminute-offset': [FAIL], - 'staging/Temporal/UserCalendar/old/trivial-protocol-implementation': [FAIL], 'staging/Temporal/ZonedDateTime/old/construction-and-properties': [FAIL], 'staging/Temporal/ZonedDateTime/old/dst-math': [FAIL], 'staging/Temporal/ZonedDateTime/old/dst-properties': [FAIL], From 66b788de9fe1213fe2be8b9399d5092cf1f85836 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 12 Sep 2022 16:50:02 +0200 Subject: [PATCH 0057/1772] [maglev] Fix catch prediction lookup Missing predicate updates when implementing TF-compatible exception handling. Bug: v8:7700 Change-Id: I6b50f67d15e4a98879d651be196d4032bfc46100 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891258 Commit-Queue: Jakob Linke Auto-Submit: Leszek Swirski Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#83142} --- src/execution/isolate.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc index 80e4c23260..6061ae5868 100644 --- a/src/execution/isolate.cc +++ b/src/execution/isolate.cc @@ -2288,7 +2288,7 @@ Object Isolate::UnwindAndFindHandler() { namespace { HandlerTable::CatchPrediction PredictException(JavaScriptFrame* frame) { HandlerTable::CatchPrediction prediction; - if (frame->is_turbofan()) { + if (frame->is_optimized()) { if (frame->LookupExceptionHandlerInTable(nullptr, nullptr) > 0) { // This optimized frame will catch. It's handler table does not include // exception prediction, and we need to use the corresponding handler @@ -2368,6 +2368,7 @@ Isolate::CatchType Isolate::PredictExceptionCatcher() { case StackFrame::INTERPRETED: case StackFrame::BASELINE: case StackFrame::TURBOFAN: + case StackFrame::MAGLEV: case StackFrame::BUILTIN: { JavaScriptFrame* js_frame = JavaScriptFrame::cast(frame); Isolate::CatchType prediction = ToCatchType(PredictException(js_frame)); From 9438113d9a28b187c94adfd5d7720bc1acebb35f Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 12 Sep 2022 15:48:10 +0200 Subject: [PATCH 0058/1772] [maglev] Add control node class for terminal nodes Allow distinguishing control nodes that do and don't allow continued execution. Bug: v8:7700 Change-Id: Ifa13b64821484584929bd62a0d8585aee160c19e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891255 Commit-Queue: Jakob Linke Auto-Submit: Leszek Swirski Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#83143} --- src/maglev/maglev-ir.h | 62 +++++++++++++++++++++++++++-------- src/maglev/maglev-regalloc.cc | 6 ++-- 2 files changed, 52 insertions(+), 16 deletions(-) diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index 4f30f9845e..8b63df2363 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -224,10 +224,13 @@ class CompactInterpreterFrameState; V(JumpToInlined) \ V(JumpFromInlined) +#define TERMINAL_CONTROL_NODE_LIST(V) \ + V(Abort) \ + V(Return) \ + V(Deopt) + #define CONTROL_NODE_LIST(V) \ - V(Abort) \ - V(Return) \ - V(Deopt) \ + TERMINAL_CONTROL_NODE_LIST(V) \ CONDITIONAL_CONTROL_NODE_LIST(V) \ UNCONDITIONAL_CONTROL_NODE_LIST(V) @@ -282,6 +285,11 @@ static constexpr Opcode kLastUnconditionalControlNodeOpcode = static constexpr Opcode kFirstUnconditionalControlNodeOpcode = std::min({UNCONDITIONAL_CONTROL_NODE_LIST(V) kLastOpcode}); +static constexpr Opcode kLastTerminalControlNodeOpcode = + std::max({TERMINAL_CONTROL_NODE_LIST(V) kFirstOpcode}); +static constexpr Opcode kFirstTerminalControlNodeOpcode = + std::min({TERMINAL_CONTROL_NODE_LIST(V) kLastOpcode}); + static constexpr Opcode kFirstControlNodeOpcode = std::min({CONTROL_NODE_LIST(V) kLastOpcode}); static constexpr Opcode kLastControlNodeOpcode = @@ -313,6 +321,10 @@ constexpr bool IsUnconditionalControlNode(Opcode opcode) { return kFirstUnconditionalControlNodeOpcode <= opcode && opcode <= kLastUnconditionalControlNodeOpcode; } +constexpr bool IsTerminalControlNode(Opcode opcode) { + return kFirstTerminalControlNodeOpcode <= opcode && + opcode <= kLastTerminalControlNodeOpcode; +} // Forward-declare NodeBase sub-hierarchies. class Node; @@ -320,6 +332,7 @@ class ControlNode; class ConditionalControlNode; class BranchControlNode; class UnconditionalControlNode; +class TerminalControlNode; class ValueNode; enum class ValueRepresentation : uint8_t { kTagged, kInt32, kFloat64 }; @@ -1025,6 +1038,10 @@ template <> constexpr bool NodeBase::Is() const { return IsUnconditionalControlNode(opcode()); } +template <> +constexpr bool NodeBase::Is() const { + return IsTerminalControlNode(opcode()); +} // The Node class hierarchy contains all non-control nodes. class Node : public NodeBase { @@ -3518,10 +3535,9 @@ class ControlNode : public NodeBase { return next_post_dominating_hole_; } void set_next_post_dominating_hole(ControlNode* node) { - DCHECK_IMPLIES(node != nullptr, - node->Is() || node->Is() || - node->Is() || node->Is() || - node->Is()); + DCHECK_IMPLIES(node != nullptr, node->Is() || + node->Is() || + node->Is()); next_post_dominating_hole_ = node; } @@ -3599,6 +3615,26 @@ class BranchControlNode : public ConditionalControlNode { BasicBlockRef if_false_; }; +class TerminalControlNode : public ControlNode { + protected: + explicit TerminalControlNode(uint64_t bitfield) : ControlNode(bitfield) {} +}; + +template +class TerminalControlNodeT : public TerminalControlNode { + static_assert(IsTerminalControlNode(opcode_of)); + + public: + // Shadowing for static knowledge. + constexpr Opcode opcode() const { return NodeBase::opcode_of; } + + protected: + explicit TerminalControlNodeT(uint64_t bitfield) + : TerminalControlNode(bitfield) { + DCHECK_EQ(NodeBase::opcode(), opcode_of); + } +}; + template class BranchControlNodeT : public BranchControlNode { static_assert(IsBranchControlNode(opcode_of)); @@ -3679,10 +3715,10 @@ class JumpFromInlined : public UnconditionalControlNodeT { DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() }; -class Abort : public ControlNode { +class Abort : public TerminalControlNode { public: explicit Abort(uint64_t bitfield, AbortReason reason) - : ControlNode(bitfield), reason_(reason) { + : TerminalControlNode(bitfield), reason_(reason) { DCHECK_EQ(NodeBase::opcode(), opcode_of); } @@ -3694,9 +3730,9 @@ class Abort : public ControlNode { const AbortReason reason_; }; -class Return : public ControlNode { +class Return : public TerminalControlNode { public: - explicit Return(uint64_t bitfield) : ControlNode(bitfield) { + explicit Return(uint64_t bitfield) : TerminalControlNode(bitfield) { DCHECK_EQ(NodeBase::opcode(), opcode_of); } @@ -3705,10 +3741,10 @@ class Return : public ControlNode { DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() }; -class Deopt : public ControlNode { +class Deopt : public TerminalControlNode { public: explicit Deopt(uint64_t bitfield, DeoptimizeReason reason) - : ControlNode(bitfield), reason_(reason) { + : TerminalControlNode(bitfield), reason_(reason) { DCHECK_EQ(NodeBase::opcode(), opcode_of); } diff --git a/src/maglev/maglev-regalloc.cc b/src/maglev/maglev-regalloc.cc index c3558b031c..71b8cc9ad9 100644 --- a/src/maglev/maglev-regalloc.cc +++ b/src/maglev/maglev-regalloc.cc @@ -20,6 +20,7 @@ #include "src/maglev/maglev-graph.h" #include "src/maglev/maglev-interpreter-frame-state.h" #include "src/maglev/maglev-ir-inl.h" +#include "src/maglev/maglev-ir.h" #include "src/maglev/maglev-regalloc-data.h" namespace v8 { @@ -81,11 +82,10 @@ ControlNode* HighestPostDominatingHole(ControlNode* first, // Walk the highest branch to find where it goes. if (first->id() > second->id()) std::swap(first, second); - // If the first branch returns or jumps back, we've found highest + // If the first branch terminates or jumps back, we've found highest // reachable control-node of the longest branch (the second control // node). - if (first->Is() || first->Is() || first->Is() || - first->Is()) { + if (first->Is() || first->Is()) { return second; } From dd6fa2d1c7f667f821c61af4c4d4f755544977c6 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 12 Sep 2022 14:38:21 +0200 Subject: [PATCH 0059/1772] [maglev] Fix lifetime extension of generator values Loop used value lifetimes extension extends the lifetime of anything used inside of a loop but defined outside of it, to make sure that it is considered 'live' for the entire body of the loop (this is so that we don't e.g. clobber their stack slots with stack slot reuse). The implementation works on the principle that a) basic blocks are topologically sorted by forward control flow, and b) loops are irreducible. This means that basic blocks between a loop header and the jump to that loop header are inside the loop, and nodes whose id preceeds the loop header's id must be before the loop. Generator resumes break this irreducibility by jumping into the middle of loops. This is principally not a problem for the above lifetime extension, it just means that the loop's used nodes will overapproximate and include these generator nodes. However, there was an implicit additional assumption that the node must be loadable by the loop end, to extend its lifetime. This fails for the generator resume case, because it's possible that the node didn't make it into any loop merge state, e.g. because the resume would immediately deopt or return, e.g. Start / \ / GeneratorResume | | v | .>Loop header | | | | | Branch | | | | | | | Suspend | | | | | | Resume <-' | | | | | Return | v `--JumpLoop Here the Resume will get the accumulator from the generator and the Return will use it, which will be seen as an out-of-loop use of the generator, but the generator was never reachable from the "real" loop body. At the end of the day, since there are no actual uses of the generator value in the loop body, the lifetime extension does no harm; all that fails is a DCHECK that the values loop lifetime extension extends are actually loadable. So, we can relax this DCHECK for this specific generator edge case, by checking for whether the JumpLoop is reachable from the generator resume. Bug: v8:7700 Change-Id: Iec4db2aee5b8812de61c3afb9004c8be3982baa2 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890975 Auto-Submit: Leszek Swirski Reviewed-by: Jakob Linke Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#83144} --- src/maglev/maglev-regalloc.cc | 74 ++++++++++++++++++++++++++++++++++- 1 file changed, 73 insertions(+), 1 deletion(-) diff --git a/src/maglev/maglev-regalloc.cc b/src/maglev/maglev-regalloc.cc index 71b8cc9ad9..8a1a2ab15e 100644 --- a/src/maglev/maglev-regalloc.cc +++ b/src/maglev/maglev-regalloc.cc @@ -799,6 +799,71 @@ void StraightForwardRegisterAllocator::InitializeConditionalBranchTarget( target); } +#ifdef DEBUG +namespace { + +bool IsReachable(BasicBlock* source_block, BasicBlock* target_block, + std::set& visited) { + if (source_block == target_block) return true; + if (!visited.insert(source_block).second) return false; + + ControlNode* control_node = source_block->control_node(); + if (UnconditionalControlNode* unconditional = + control_node->TryCast()) { + return IsReachable(unconditional->target(), target_block, visited); + } + if (BranchControlNode* branch = control_node->TryCast()) { + return IsReachable(branch->if_true(), target_block, visited) || + IsReachable(branch->if_true(), target_block, visited); + } + if (Switch* switch_node = control_node->TryCast()) { + const BasicBlockRef* targets = switch_node->targets(); + for (int i = 0; i < switch_node->size(); i++) { + if (IsReachable(source_block, targets[i].block_ptr(), visited)) { + return true; + } + } + if (switch_node->has_fallthrough()) { + if (IsReachable(source_block, switch_node->fallthrough(), visited)) { + return true; + } + } + return false; + } + return false; +} + +// Complex predicate for a JumpLoop lifetime extension DCHECK, see comments +// in AllocateControlNode. +bool IsValueFromGeneratorResumeThatDoesNotReachJumpLoop( + Graph* graph, ValueNode* input_node, BasicBlock* jump_loop_block) { + // The given node _must_ be created in the generator resume block. This is + // always the third block -- the first is inital values, the second is the + // test for an undefined generator, and the third is the generator resume + // machinery. + DCHECK_GE(graph->num_blocks(), 3); + BasicBlock* generator_block = *(graph->begin() + 2); + DCHECK_EQ(generator_block->control_node()->opcode(), Opcode::kSwitch); + + bool found_node = false; + for (Node* node : generator_block->nodes()) { + if (node == input_node) { + found_node = true; + break; + } + } + DCHECK(found_node); + + std::set visited; + bool jump_loop_block_is_reachable_from_generator_block = + IsReachable(generator_block, jump_loop_block, visited); + DCHECK(!jump_loop_block_is_reachable_from_generator_block); + + return true; +} +} // namespace +#endif + void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, BasicBlock* block) { current_node_ = node; @@ -853,7 +918,14 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, // extended lifetime nodes are dead. if (auto jump_loop = node->TryCast()) { for (Input& input : jump_loop->used_nodes()) { - DCHECK(input.node()->has_register() || input.node()->is_loadable()); + // Since the value is used by the loop, it must be live somewhere ( + // either in a register or loadable). The exception is when this value + // is created in a generator resume, and the use of it cannot reach the + // JumpLoop (e.g. because it returns or deopts on resume). + DCHECK_IMPLIES( + !input.node()->has_register() && !input.node()->is_loadable(), + IsValueFromGeneratorResumeThatDoesNotReachJumpLoop( + graph_, input.node(), block)); UpdateUse(&input); } } From 58f38e5228fc1faca375d2183129f3fe42509360 Mon Sep 17 00:00:00 2001 From: Greg Thompson Date: Sat, 10 Sep 2022 16:09:53 +0200 Subject: [PATCH 0060/1772] [fuchsia] Remove v8.cmx, as it is no longer used Bug: v8:12589 Change-Id: Idf341625f8fadf4a0145887c0ec6642b5e6bfd88 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885882 Reviewed-by: Alexander Schulze Commit-Queue: Greg Thompson Cr-Commit-Position: refs/heads/main@{#83145} --- gni/OWNERS | 1 - gni/v8.cmx | 52 ---------------------------------------------------- 2 files changed, 53 deletions(-) delete mode 100644 gni/v8.cmx diff --git a/gni/OWNERS b/gni/OWNERS index 5f16e60686..c20b8de5a2 100644 --- a/gni/OWNERS +++ b/gni/OWNERS @@ -1,6 +1,5 @@ file:../INFRA_OWNERS per-file v8.cml=victorgomes@chromium.org -per-file v8.cmx=victorgomes@chromium.org per-file release_branch_toggle.gni=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com per-file release_branch_toggle.gni=vahl@chromium.org diff --git a/gni/v8.cmx b/gni/v8.cmx deleted file mode 100644 index 45fd74a09f..0000000000 --- a/gni/v8.cmx +++ /dev/null @@ -1,52 +0,0 @@ -{ - "facets": { - "fuchsia.test": { - "system-services": [ - "fuchsia.kernel.VmexResource" - ] - } - }, - "sandbox": { - "dev": [ - "null", - "zero" - ], - "features": [ - "deprecated-ambient-replace-as-executable", - "isolated-cache-storage", - "isolated-persistent-storage", - "isolated-temp", - "root-ssl-certificates", - "vulkan" - ], - "services": [ - "fuchsia.accessibility.semantics.SemanticsManager", - "fuchsia.camera3.DeviceWatcher", - "fuchsia.device.NameProvider", - "fuchsia.fonts.Provider", - "fuchsia.intl.PropertyProvider", - "fuchsia.kernel.VmexResource", - "fuchsia.logger.Log", - "fuchsia.logger.LogSink", - "fuchsia.media.Audio", - "fuchsia.media.SessionAudioConsumerFactory", - "fuchsia.media.drm.Widevine", - "fuchsia.mediacodec.CodecFactory", - "fuchsia.memorypressure.Provider", - "fuchsia.net.NameLookup", - "fuchsia.net.interfaces.State", - "fuchsia.posix.socket.Provider", - "fuchsia.process.Launcher", - "fuchsia.sys.Environment", - "fuchsia.sys.Launcher", - "fuchsia.sys.Loader", - "fuchsia.sysmem.Allocator", - "fuchsia.ui.input.ImeService", - "fuchsia.ui.input.ImeVisibilityService", - "fuchsia.ui.scenic.Scenic", - "fuchsia.ui.policy.Presenter", - "fuchsia.vulkan.loader.Loader", - "fuchsia.web.ContextProvider" - ] - } -} From 002ac4168cb9c61cd91ec6b9293225863100d3a2 Mon Sep 17 00:00:00 2001 From: Fabrice de Gans Date: Mon, 12 Sep 2022 14:13:11 -0700 Subject: [PATCH 0061/1772] [code-health] Fix remaining flake8 issue in v8 Bug: v8:8594 Change-Id: I398678bb92105dc99882e4a253d0c6235628952f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892178 Commit-Queue: Michael Lippautz Reviewed-by: Michael Lippautz Auto-Submit: Fabrice de Gans Cr-Commit-Position: refs/heads/main@{#83146} --- tools/dev/update-compile-commands.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/dev/update-compile-commands.py b/tools/dev/update-compile-commands.py index 1e469b0810..26e6be2c25 100755 --- a/tools/dev/update-compile-commands.py +++ b/tools/dev/update-compile-commands.py @@ -52,7 +52,7 @@ def PrepareBuildDir(arch, mode): build_ninja = os.path.join(build_dir, "build.ninja") if not os.path.exists(build_ninja): code = _Call("gn gen %s" % build_dir) - if code != 0: raise Error("gn gen failed") + if code != 0: raise Exception("gn gen failed") else: _Call("ninja -C %s build.ninja" % build_dir) return build_dir From 36559d91ca480a61c5ef6d64ea414ccbfee481ac Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Fri, 9 Sep 2022 14:52:34 -0700 Subject: [PATCH 0062/1772] [rab/gsab] Fix length-tracking handling in TA#subarray MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The normative change in https://github.com/tc39/proposal-resizablearraybuffer/pull/93 changed the behavior of TypedArray.prototype.subarray(begin, end) such that if the receiver is a length-tracking TA and end is undefined, the result TypedArray is also length-tracking. This change reached consensus in the March 2022 TC39. Bug: v8:11111 Change-Id: If1a84cc3134f3ce8046196d6cc36683b6996dec0 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3888382 Commit-Queue: Marja Hölttä Auto-Submit: Shu-yu Guo Reviewed-by: Marja Hölttä Cr-Commit-Position: refs/heads/main@{#83147} --- src/builtins/typed-array-createtypedarray.tq | 4 +- src/builtins/typed-array-subarray.tq | 64 ++++++++++++------- .../typedarray-growablesharedarraybuffer.js | 13 ++-- .../typedarray-resizablearraybuffer.js | 17 +++-- 4 files changed, 60 insertions(+), 38 deletions(-) diff --git a/src/builtins/typed-array-createtypedarray.tq b/src/builtins/typed-array-createtypedarray.tq index acee937f75..91dd4404a7 100644 --- a/src/builtins/typed-array-createtypedarray.tq +++ b/src/builtins/typed-array-createtypedarray.tq @@ -478,12 +478,12 @@ transitioning macro TypedArraySpeciesCreateByLength(implicit context: Context)( transitioning macro TypedArraySpeciesCreateByBuffer(implicit context: Context)( methodName: constexpr string, exemplar: JSTypedArray, buffer: JSArrayBuffer, - beginByteOffset: uintptr, newLength: uintptr): JSTypedArray { + beginByteOffset: uintptr, newLength: NumberOrUndefined): JSTypedArray { const numArgs: constexpr int31 = 3; // TODO(v8:4153): pass length further as uintptr. const typedArray: JSTypedArray = TypedArraySpeciesCreate( methodName, numArgs, exemplar, buffer, Convert(beginByteOffset), - Convert(newLength)); + newLength); return typedArray; } diff --git a/src/builtins/typed-array-subarray.tq b/src/builtins/typed-array-subarray.tq index 1d815e84ea..d9506321b8 100644 --- a/src/builtins/typed-array-subarray.tq +++ b/src/builtins/typed-array-subarray.tq @@ -10,16 +10,18 @@ transitioning javascript builtin TypedArrayPrototypeSubArray( const methodName: constexpr string = '%TypedArray%.prototype.subarray'; // 1. Let O be the this value. - // 3. If O does not have a [[TypedArrayName]] internal slot, throw a - // TypeError exception. + // 2. Perform ? RequireInternalSlot(O, [[TypedArrayName]]). const source = Cast(receiver) otherwise ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, methodName); - // 5. Let buffer be O.[[ViewedArrayBuffer]]. + // 3. Assert: O has a [[ViewedArrayBuffer]] internal slot. + // 4. Let buffer be O.[[ViewedArrayBuffer]]. const buffer = typed_array::GetTypedArrayBuffer(source); - // 6. Let srcLength be O.[[ArrayLength]]. + // 5. Let getSrcBufferByteLength be + // MakeIdempotentArrayBufferByteLengthGetter(SeqCst). + // 6. Let srcLength be IntegerIndexedObjectLength(O, getSrcBufferByteLength). let srcLength: uintptr; try { srcLength = LoadJSTypedArrayLengthAndCheckDetached(source) @@ -29,41 +31,57 @@ transitioning javascript builtin TypedArrayPrototypeSubArray( srcLength = 0; } - // 8. Let relativeBegin be ? ToInteger(begin). - // 9. If relativeBegin < 0, let beginIndex be max((srcLength + - // relativeBegin), 0); else let beginIndex be min(relativeBegin, - // srcLength). + // 8. Let relativeBegin be ? ToIntegerOrInfinity(begin). + // 9. If relativeBegin is -∞, let beginIndex be 0. + // 10. Else if relativeBegin < 0, let beginIndex be max(srcLength + + // relativeBegin, 0). + // 11. Else, let beginIndex be min(relativeBegin, srcLength). const arg0 = arguments[0]; const begin: uintptr = arg0 != Undefined ? ConvertAndClampRelativeIndex(arg0, srcLength) : 0; - // 10. If end is undefined, let relativeEnd be srcLength; - // else, let relativeEnd be ? ToInteger(end). - // 11. If relativeEnd < 0, let endIndex be max((srcLength + relativeEnd), - // 0); else let endIndex be min(relativeEnd, srcLength). + // 12. If O.[[ArrayLength]] is auto and end is undefined, then const arg1 = arguments[1]; - const end: uintptr = arg1 != Undefined ? - ConvertAndClampRelativeIndex(arg1, srcLength) : - srcLength; + const endIsDefined = arg1 != Undefined; - // 12. Let newLength be max(endIndex - beginIndex, 0). - const newLength: uintptr = Unsigned(IntPtrMax(Signed(end - begin), 0)); + let newLength: NumberOrUndefined; + if (IsLengthTrackingJSArrayBufferView(source) && !endIsDefined) { + // a. Let newLength be undefined. + newLength = Undefined; + } else { + // 13. Else, + // a. If end is undefined, let relativeEnd be srcLength; else let + // relativeEnd be ? ToIntegerOrInfinity(end). + // b. If relativeEnd is -∞, let endIndex be 0. + // c. Else if relativeEnd < 0, let endIndex be max(srcLength + + // relativeEnd, 0). + // d. Else, let endIndex be min(relativeEnd, srcLength). + const end: uintptr = endIsDefined ? + ConvertAndClampRelativeIndex(arg1, srcLength) : + srcLength; - // 13. Let constructorName be the String value of O.[[TypedArrayName]]. - // 14. Let elementSize be the Number value of the Element Size value + // e. Let newLength be max(endIndex - beginIndex, 0). + newLength = Convert(Unsigned(IntPtrMax(Signed(end - begin), 0))); + } + + // 14. Let constructorName be the String value of O.[[TypedArrayName]]. + // 15. Let elementSize be the Number value of the Element Size value // specified in Table 52 for constructorName. const elementsInfo = typed_array::GetTypedArrayElementsInfo(source); - // 15. Let srcByteOffset be O.[[ByteOffset]]. + // 16. Let srcByteOffset be O.[[ByteOffset]]. const srcByteOffset: uintptr = source.byte_offset; - // 16. Let beginByteOffset be srcByteOffset + beginIndex × elementSize. + // 17. Let beginByteOffset be srcByteOffset + beginIndex × elementSize. const beginByteOffset = srcByteOffset + elementsInfo.CalculateByteLength(begin) otherwise ThrowRangeError(MessageTemplate::kInvalidArrayBufferLength); - // 17. Let argumentsList be « buffer, beginByteOffset, newLength ». - // 18. Return ? TypedArraySpeciesCreate(O, argumentsList). + // 18. If newLength is undefined, then + // a. Let argumentsList be « buffer, 𝔽(beginByteOffset) ». + // 19. Else, + // a. Let argumentsList be « buffer, 𝔽(beginByteOffset), 𝔽(newLength) ». + // 20. Return ? TypedArraySpeciesCreate(O, argumentsList). return TypedArraySpeciesCreateByBuffer( methodName, source, buffer, beginByteOffset, newLength); } diff --git a/test/mjsunit/typedarray-growablesharedarraybuffer.js b/test/mjsunit/typedarray-growablesharedarraybuffer.js index 1e5679521a..6627e9ee3b 100644 --- a/test/mjsunit/typedarray-growablesharedarraybuffer.js +++ b/test/mjsunit/typedarray-growablesharedarraybuffer.js @@ -3437,11 +3437,11 @@ Reverse(ArrayReverseHelper); assertEquals(4, fixedLengthSubFull.length); assertEquals(2, fixedLengthWithOffsetSubFull.length); - // TODO(v8:11111): Are subarrays of length-tracking TAs also - // length-tracking? See - // https://github.com/tc39/proposal-resizablearraybuffer/issues/91 - assertEquals(4, lengthTrackingSubFull.length); - assertEquals(2, lengthTrackingWithOffsetSubFull.length); + // Subarrays of length-tracking TAs that don't pass an explicit end argument + // are also length-tracking. + assertEquals(lengthTracking.length, lengthTrackingSubFull.length); + assertEquals(lengthTrackingWithOffset.length, + lengthTrackingWithOffsetSubFull.length); } })(); @@ -3479,7 +3479,8 @@ Reverse(ArrayReverseHelper); const evil = { valueOf: () => { gsab.grow(6 * ctor.BYTES_PER_ELEMENT); return 0;}}; - assertEquals([0, 2, 4, 6], ToNumbers(lengthTracking.subarray(evil))); + assertEquals([0, 2, 4, 6], ToNumbers( + lengthTracking.subarray(evil, lengthTracking.length))); } })(); diff --git a/test/mjsunit/typedarray-resizablearraybuffer.js b/test/mjsunit/typedarray-resizablearraybuffer.js index 38ef983666..d72260f561 100644 --- a/test/mjsunit/typedarray-resizablearraybuffer.js +++ b/test/mjsunit/typedarray-resizablearraybuffer.js @@ -6682,11 +6682,11 @@ Reverse(ArrayReverseHelper, false); assertEquals(4, fixedLengthSubFull.length); assertEquals(2, fixedLengthWithOffsetSubFull.length); - // TODO(v8:11111): Are subarrays of length-tracking TAs also - // length-tracking? See - // https://github.com/tc39/proposal-resizablearraybuffer/issues/91 - assertEquals(4, lengthTrackingSubFull.length); - assertEquals(2, lengthTrackingWithOffsetSubFull.length); + // Subarrays of length-tracking TAs that don't pass an explicit end argument + // are also length-tracking. + assertEquals(lengthTracking.length, lengthTrackingSubFull.length); + assertEquals(lengthTrackingWithOffset.length, + lengthTrackingWithOffsetSubFull.length); } })(); @@ -6779,7 +6779,9 @@ Reverse(ArrayReverseHelper, false); rab.resize(2 * ctor.BYTES_PER_ELEMENT); return 0; }}; - assertThrows(() => { lengthTracking.subarray(evil); }); + assertThrows(() => { + lengthTracking.subarray(evil, lengthTracking.length); + }); } // Like the previous test, but now we construct a smaller subarray and it @@ -6874,7 +6876,8 @@ Reverse(ArrayReverseHelper, false); const evil = { valueOf: () => { rab.resize(6 * ctor.BYTES_PER_ELEMENT); return 0;}}; - assertEquals([0, 2, 4, 6], ToNumbers(lengthTracking.subarray(evil))); + assertEquals([0, 2, 4, 6], ToNumbers( + lengthTracking.subarray(evil, lengthTracking.length))); } })(); From a26ca5ed147dde47f02d70c5b38d8befc1c93cb3 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Thu, 8 Sep 2022 19:39:11 +0800 Subject: [PATCH 0063/1772] [mips32] Delete mips32 from v8 Bug: v8:13206 Change-Id: Ifb5daeff2a1e91fd098bc5abe9f81339575636bf Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3837160 Reviewed-by: Hannes Payer Reviewed-by: Jakob Linke Reviewed-by: Michael Achenbach Auto-Submit: Liu Yu Commit-Queue: Liu Yu Cr-Commit-Position: refs/heads/main@{#83148} --- BUILD.gn | 102 +- LICENSE | 3 +- gni/snapshot_toolchain.gni | 9 +- gni/v8.gni | 3 +- include/v8-unwinder-state.h | 8 +- include/v8config.h | 22 +- src/base/compiler-specific.h | 8 +- src/base/cpu.cc | 54 +- src/base/numbers/strtod.cc | 2 +- src/base/platform/yield-processor.h | 5 - src/baseline/baseline-assembler-inl.h | 2 - src/baseline/baseline-compiler.cc | 2 - .../mips/baseline-assembler-mips-inl.h | 573 - .../mips/baseline-compiler-mips-inl.h | 78 - src/builtins/builtins-internal-gen.cc | 4 +- .../builtins-sharedarraybuffer-gen.cc | 29 +- src/builtins/builtins.cc | 4 +- src/builtins/mips/builtins-mips.cc | 4263 ------- src/codegen/assembler-arch.h | 2 - src/codegen/assembler-inl.h | 2 - src/codegen/assembler.h | 3 +- src/codegen/constants-arch.h | 2 - src/codegen/cpu-features.h | 2 +- src/codegen/external-reference.cc | 2 - src/codegen/interface-descriptors-inl.h | 12 +- src/codegen/interface-descriptors.cc | 2 +- src/codegen/macro-assembler.h | 3 - src/codegen/mips/assembler-mips-inl.h | 355 - src/codegen/mips/assembler-mips.cc | 3854 ------ src/codegen/mips/assembler-mips.h | 1923 --- src/codegen/mips/constants-mips.cc | 144 - src/codegen/mips/constants-mips.h | 1918 --- src/codegen/mips/cpu-mips.cc | 45 - .../mips/interface-descriptors-mips-inl.h | 314 - src/codegen/mips/macro-assembler-mips.cc | 5655 --------- src/codegen/mips/macro-assembler-mips.h | 1211 -- src/codegen/mips/register-mips.h | 299 - src/codegen/mips/reglist-mips.h | 48 - src/codegen/register-arch.h | 2 - src/codegen/reglist.h | 2 - src/codegen/reloc-info.cc | 9 +- src/codegen/reloc-info.h | 2 +- src/common/globals.h | 9 +- src/compiler/backend/instruction-codes.h | 2 - src/compiler/backend/instruction-selector.cc | 5 +- .../backend/mips/code-generator-mips.cc | 4443 ------- .../backend/mips/instruction-codes-mips.h | 400 - .../mips/instruction-scheduler-mips.cc | 1804 --- .../backend/mips/instruction-selector-mips.cc | 2573 ---- src/compiler/c-linkage.cc | 9 - src/deoptimizer/mips/deoptimizer-mips.cc | 34 - src/diagnostics/mips/disasm-mips.cc | 2736 ---- src/diagnostics/mips/unwinder-mips.cc | 14 - src/diagnostics/perf-jit.h | 3 - src/execution/clobber-registers.cc | 5 - src/execution/frame-constants.h | 2 - src/execution/mips/frame-constants-mips.cc | 32 - src/execution/mips/frame-constants-mips.h | 84 - src/execution/mips/simulator-mips.cc | 7304 ----------- src/execution/mips/simulator-mips.h | 719 -- src/execution/simulator-base.h | 1 - src/execution/simulator.h | 2 - src/flags/flag-definitions.h | 4 +- src/heap/base/asm/mips/push_registers_asm.cc | 48 - src/interpreter/interpreter-assembler.cc | 3 +- src/libsampler/sampler.cc | 4 - src/logging/log.cc | 2 - src/objects/code.cc | 9 +- src/profiler/tick-sample.cc | 2 +- .../mips/regexp-macro-assembler-mips.cc | 1359 -- src/regexp/mips/regexp-macro-assembler-mips.h | 231 - src/regexp/regexp-macro-assembler-arch.h | 2 - src/regexp/regexp.cc | 3 - src/runtime/runtime-atomics.cc | 7 +- src/snapshot/deserializer.h | 8 +- src/snapshot/embedded/embedded-data.cc | 9 +- .../platform-embedded-file-writer-generic.cc | 3 +- src/utils/memcopy.h | 20 +- src/wasm/baseline/liftoff-assembler.h | 2 - src/wasm/baseline/liftoff-compiler.cc | 4 +- .../baseline/mips/liftoff-assembler-mips.h | 3237 ----- src/wasm/jump-table-assembler.cc | 2 +- src/wasm/wasm-linkage.h | 9 - src/wasm/wasm-serialization.cc | 6 +- test/cctest/BUILD.gn | 15 +- test/cctest/cctest.h | 2 +- test/cctest/cctest.status | 30 +- test/cctest/compiler/test-run-machops.cc | 3 +- test/cctest/test-api.cc | 6 +- test/cctest/test-assembler-mips.cc | 10454 ---------------- test/cctest/test-disasm-mips.cc | 1814 --- test/cctest/test-lockers.cc | 6 +- test/cctest/test-macro-assembler-mips.cc | 1372 -- test/debugger/debugger.status | 2 +- test/message/message.status | 4 +- test/mjsunit/mjsunit.status | 70 +- test/mozilla/mozilla.status | 34 +- test/test262/test262.status | 6 +- test/unittests/BUILD.gn | 5 - .../turbo-assembler-mips-unittest.cc | 66 - .../instruction-selector-mips-unittest.cc | 1426 --- test/unittests/regexp/regexp-unittest.cc | 2 - test/unittests/unittests.status | 6 - .../wasm/liftoff-register-unittests.cc | 2 - test/wasm-spec-tests/wasm-spec-tests.status | 12 +- test/webkit/webkit.status | 4 - tools/cppgc/gen_cmake.py | 2 +- tools/dev/gen-tags.py | 3 +- tools/dev/gm.py | 10 +- tools/disasm.py | 1 - tools/generate-header-include-checks.py | 4 +- tools/profiling/ll_prof.py | 1 - tools/run_perf.py | 2 - tools/testrunner/build_config.py | 11 +- tools/testrunner/local/statusfile.py | 6 +- tools/testrunner/local/utils.py | 5 +- tools/toolchain/BUILD.gn | 44 - 117 files changed, 149 insertions(+), 61418 deletions(-) delete mode 100644 src/baseline/mips/baseline-assembler-mips-inl.h delete mode 100644 src/baseline/mips/baseline-compiler-mips-inl.h delete mode 100644 src/builtins/mips/builtins-mips.cc delete mode 100644 src/codegen/mips/assembler-mips-inl.h delete mode 100644 src/codegen/mips/assembler-mips.cc delete mode 100644 src/codegen/mips/assembler-mips.h delete mode 100644 src/codegen/mips/constants-mips.cc delete mode 100644 src/codegen/mips/constants-mips.h delete mode 100644 src/codegen/mips/cpu-mips.cc delete mode 100644 src/codegen/mips/interface-descriptors-mips-inl.h delete mode 100644 src/codegen/mips/macro-assembler-mips.cc delete mode 100644 src/codegen/mips/macro-assembler-mips.h delete mode 100644 src/codegen/mips/register-mips.h delete mode 100644 src/codegen/mips/reglist-mips.h delete mode 100644 src/compiler/backend/mips/code-generator-mips.cc delete mode 100644 src/compiler/backend/mips/instruction-codes-mips.h delete mode 100644 src/compiler/backend/mips/instruction-scheduler-mips.cc delete mode 100644 src/compiler/backend/mips/instruction-selector-mips.cc delete mode 100644 src/deoptimizer/mips/deoptimizer-mips.cc delete mode 100644 src/diagnostics/mips/disasm-mips.cc delete mode 100644 src/diagnostics/mips/unwinder-mips.cc delete mode 100644 src/execution/mips/frame-constants-mips.cc delete mode 100644 src/execution/mips/frame-constants-mips.h delete mode 100644 src/execution/mips/simulator-mips.cc delete mode 100644 src/execution/mips/simulator-mips.h delete mode 100644 src/heap/base/asm/mips/push_registers_asm.cc delete mode 100644 src/regexp/mips/regexp-macro-assembler-mips.cc delete mode 100644 src/regexp/mips/regexp-macro-assembler-mips.h delete mode 100644 src/wasm/baseline/mips/liftoff-assembler-mips.h delete mode 100644 test/cctest/test-assembler-mips.cc delete mode 100644 test/cctest/test-disasm-mips.cc delete mode 100644 test/cctest/test-macro-assembler-mips.cc delete mode 100644 test/unittests/assembler/turbo-assembler-mips-unittest.cc delete mode 100644 test/unittests/compiler/mips/instruction-selector-mips-unittest.cc diff --git a/BUILD.gn b/BUILD.gn index 43667839c5..bbe5ec3c00 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1119,49 +1119,11 @@ config("toolchain") { } } - # Mips64el/mipsel simulators. - if (target_is_simulator && - (v8_current_cpu == "mipsel" || v8_current_cpu == "mips64el")) { + # Mips64el simulators. + if (target_is_simulator && v8_current_cpu == "mips64el") { defines += [ "_MIPS_TARGET_SIMULATOR" ] } - if (v8_current_cpu == "mipsel" || v8_current_cpu == "mips") { - defines += [ "V8_TARGET_ARCH_MIPS" ] - if (v8_can_use_fpu_instructions) { - defines += [ "CAN_USE_FPU_INSTRUCTIONS" ] - } - if (v8_use_mips_abi_hardfloat) { - defines += [ - "__mips_hard_float=1", - "CAN_USE_FPU_INSTRUCTIONS", - ] - } else { - defines += [ "__mips_soft_float=1" ] - } - if (mips_arch_variant == "r6") { - defines += [ - "_MIPS_ARCH_MIPS32R6", - "FPU_MODE_FP64", - ] - if (mips_use_msa) { - defines += [ "_MIPS_MSA" ] - } - } else if (mips_arch_variant == "r2") { - defines += [ "_MIPS_ARCH_MIPS32R2" ] - if (mips_fpu_mode == "fp64") { - defines += [ "FPU_MODE_FP64" ] - } else if (mips_fpu_mode == "fpxx") { - defines += [ "FPU_MODE_FPXX" ] - } else if (mips_fpu_mode == "fp32") { - defines += [ "FPU_MODE_FP32" ] - } - } else if (mips_arch_variant == "r1") { - defines += [ "FPU_MODE_FP32" ] - } - - # TODO(infra): Add support for mips_arch_variant rx and loongson. - } - if (v8_current_cpu == "mips64el" || v8_current_cpu == "mips64") { defines += [ "V8_TARGET_ARCH_MIPS64" ] if (v8_can_use_fpu_instructions) { @@ -1335,6 +1297,7 @@ config("toolchain") { if (is_clang) { cflags += [ "-Wmissing-field-initializers", + "-Wunreachable-code", # Google3 enables this warning, so we should also enable it to find issue # earlier. See https://reviews.llvm.org/D56731 for details about this @@ -1345,11 +1308,6 @@ config("toolchain") { "-Wno-shadow", ] - if (v8_current_cpu != "mips" && v8_current_cpu != "mipsel") { - # We exclude MIPS because the IsMipsArchVariant macro causes trouble. - cflags += [ "-Wunreachable-code" ] - } - if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64") { cflags += [ "-Wshorten-64-to-32" ] @@ -1609,8 +1567,7 @@ if (is_android && enable_java_templates) { if (v8_use_external_startup_data) { deps = [ "//v8" ] renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ] - if (current_cpu == "arm" || current_cpu == "x86" || - current_cpu == "mipsel") { + if (current_cpu == "arm" || current_cpu == "x86") { renaming_destinations = [ "snapshot_blob_32.bin" ] } else { renaming_destinations = [ "snapshot_blob_64.bin" ] @@ -2340,8 +2297,7 @@ action("v8_dump_build_config") { "v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack", ] - if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || - v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { + if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { args += [ "mips_arch_variant=\"$mips_arch_variant\"", "mips_use_msa=$mips_use_msa", @@ -2514,11 +2470,6 @@ v8_source_set("v8_initializers") { ### gcmole(arch:arm64) ### "src/builtins/arm64/builtins-arm64.cc", ] - } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { - sources += [ - ### gcmole(arch:mipsel) ### - "src/builtins/mips/builtins-mips.cc", - ] } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { sources += [ ### gcmole(arch:mips64el) ### @@ -3898,22 +3849,6 @@ v8_header_set("v8_internal_headers") { if (is_win) { sources += [ "src/diagnostics/unwinding-info-win64.h" ] } - } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { - sources += [ ### gcmole(arch:mipsel) ### - "src/baseline/mips/baseline-assembler-mips-inl.h", - "src/baseline/mips/baseline-compiler-mips-inl.h", - "src/codegen/mips/assembler-mips-inl.h", - "src/codegen/mips/assembler-mips.h", - "src/codegen/mips/constants-mips.h", - "src/codegen/mips/macro-assembler-mips.h", - "src/codegen/mips/register-mips.h", - "src/codegen/mips/reglist-mips.h", - "src/compiler/backend/mips/instruction-codes-mips.h", - "src/execution/mips/frame-constants-mips.h", - "src/execution/mips/simulator-mips.h", - "src/regexp/mips/regexp-macro-assembler-mips.h", - "src/wasm/baseline/mips/liftoff-assembler-mips.h", - ] } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { sources += [ ### gcmole(arch:mips64el) ### "src/baseline/mips64/baseline-assembler-mips64-inl.h", @@ -5004,23 +4939,6 @@ v8_source_set("v8_base_without_compiler") { if (is_win) { sources += [ "src/diagnostics/unwinding-info-win64.cc" ] } - } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { - sources += [ ### gcmole(arch:mipsel) ### - "src/codegen/mips/assembler-mips.cc", - "src/codegen/mips/constants-mips.cc", - "src/codegen/mips/cpu-mips.cc", - "src/codegen/mips/interface-descriptors-mips-inl.h", - "src/codegen/mips/macro-assembler-mips.cc", - "src/compiler/backend/mips/code-generator-mips.cc", - "src/compiler/backend/mips/instruction-scheduler-mips.cc", - "src/compiler/backend/mips/instruction-selector-mips.cc", - "src/deoptimizer/mips/deoptimizer-mips.cc", - "src/diagnostics/mips/disasm-mips.cc", - "src/diagnostics/mips/unwinder-mips.cc", - "src/execution/mips/frame-constants-mips.cc", - "src/execution/mips/simulator-mips.cc", - "src/regexp/mips/regexp-macro-assembler-mips.cc", - ] } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { sources += [ ### gcmole(arch:mips64el) ### "src/codegen/mips64/assembler-mips64.cc", @@ -5246,8 +5164,7 @@ v8_source_set("v8_base_without_compiler") { # Platforms that don't have CAS support need to link atomic library # to implement atomic memory access - if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" || - v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" || + if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" || v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || v8_current_cpu == "s390" || v8_current_cpu == "s390x" || v8_current_cpu == "riscv64" || v8_current_cpu == "riscv32") { @@ -5637,7 +5554,7 @@ v8_component("v8_libbase") { data_deps += [ "//build/win:runtime_libs" ] } - if (v8_current_cpu == "mips" || v8_current_cpu == "mips64") { + if (v8_current_cpu == "mips64") { # Add runtime libs for mips. data += [ "tools/mips_toolchain/sysroot/usr/lib/", @@ -5645,8 +5562,7 @@ v8_component("v8_libbase") { ] } - if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm" || - v8_current_cpu == "mips")) { + if (is_ubsan && (v8_current_cpu == "x86" || v8_current_cpu == "arm")) { # Special UBSan 32-bit requirement. sources += [ "src/base/ubsan.cc" ] } @@ -5826,8 +5742,6 @@ v8_source_set("v8_heap_base") { sources += [ "src/heap/base/asm/ppc/push_registers_asm.cc" ] } else if (current_cpu == "s390x") { sources += [ "src/heap/base/asm/s390/push_registers_asm.cc" ] - } else if (current_cpu == "mipsel") { - sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ] } else if (current_cpu == "mips64el") { sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ] } else if (current_cpu == "loong64") { diff --git a/LICENSE b/LICENSE index 53d9c47e33..f665c480d2 100644 --- a/LICENSE +++ b/LICENSE @@ -15,8 +15,7 @@ are: - Strongtalk assembler, the basis of the files assembler-arm-inl.h, assembler-arm.cc, assembler-arm.h, assembler-ia32-inl.h, assembler-ia32.cc, assembler-ia32.h, assembler-x64-inl.h, - assembler-x64.cc, assembler-x64.h, assembler-mips-inl.h, - assembler-mips.cc, assembler-mips.h, assembler.cc and assembler.h. + assembler-x64.cc, assembler-x64.h, assembler.cc and assembler.h. This code is copyrighted by Sun Microsystems Inc. and released under a 3-clause BSD license. diff --git a/gni/snapshot_toolchain.gni b/gni/snapshot_toolchain.gni index 637dcd39c4..5f5e53a30d 100644 --- a/gni/snapshot_toolchain.gni +++ b/gni/snapshot_toolchain.gni @@ -64,8 +64,7 @@ if (v8_snapshot_toolchain == "") { current_cpu == "arm") { # Trying to compile 32-bit arm on arm64. Good luck! v8_snapshot_toolchain = current_toolchain - } else if (host_cpu == "x64" && - (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) { + } else if (host_cpu == "x64" && v8_current_cpu == "mips64") { # We don't support snapshot generation for big-endian targets, # therefore snapshots will need to be built using native mksnapshot # in combination with qemu @@ -96,8 +95,7 @@ if (v8_snapshot_toolchain == "") { } else { _cpus = "x64_v8_${v8_current_cpu}" } - } else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel" || - v8_current_cpu == "riscv32") { + } else if (v8_current_cpu == "arm" || v8_current_cpu == "riscv32") { _cpus = "x86_v8_${v8_current_cpu}" } else { # This branch should not be reached; leave _cpus blank so the assert @@ -122,7 +120,6 @@ assert(v8_snapshot_toolchain != "", # avoid building v8_libbase on the host more than once. On mips with big endian, # the snapshot toolchain is the target toolchain and, hence, can't be used. v8_generator_toolchain = v8_snapshot_toolchain -if (host_cpu == "x64" && - (v8_current_cpu == "mips" || v8_current_cpu == "mips64")) { +if (host_cpu == "x64" && v8_current_cpu == "mips64") { v8_generator_toolchain = "//build/toolchain/linux:clang_x64" } diff --git a/gni/v8.gni b/gni/v8.gni index 3dd9fde0fa..3f093597fa 100644 --- a/gni/v8.gni +++ b/gni/v8.gni @@ -199,8 +199,7 @@ if ((is_posix || is_fuchsia) && } # On MIPS gcc_target_rpath and ldso_path might be needed for all builds. -if (target_cpu == "mipsel" || target_cpu == "mips64el" || - target_cpu == "mips" || target_cpu == "mips64") { +if (target_cpu == "mips64el" || target_cpu == "mips64") { v8_add_configs += [ "//build/config/gcc:rpath_for_built_shared_libraries" ] } diff --git a/include/v8-unwinder-state.h b/include/v8-unwinder-state.h index 4154905d13..18bb410d2b 100644 --- a/include/v8-unwinder-state.h +++ b/include/v8-unwinder-state.h @@ -17,10 +17,10 @@ struct CalleeSavedRegisters { void* arm_r9; void* arm_r10; }; -#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \ - V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \ - V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 +#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \ + V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \ + V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_LOONG64 || \ + V8_TARGET_ARCH_RISCV32 struct CalleeSavedRegisters {}; #else #error Target architecture was not detected as supported by v8 diff --git a/include/v8config.h b/include/v8config.h index 71d932006b..6753eb083b 100644 --- a/include/v8config.h +++ b/include/v8config.h @@ -653,9 +653,6 @@ V8 shared library set USING_V8_SHARED. #elif defined(__mips64) #define V8_HOST_ARCH_MIPS64 1 #define V8_HOST_ARCH_64_BIT 1 -#elif defined(__MIPSEB__) || defined(__MIPSEL__) -#define V8_HOST_ARCH_MIPS 1 -#define V8_HOST_ARCH_32_BIT 1 #elif defined(__loongarch64) #define V8_HOST_ARCH_LOONG64 1 #define V8_HOST_ARCH_64_BIT 1 @@ -691,10 +688,10 @@ V8 shared library set USING_V8_SHARED. // The macros may be set externally. If not, detect in the same way as the host // architecture, that is, target the native environment as presented by the // compiler. -#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ - !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \ - !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ - !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \ +#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ + !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_PPC && \ + !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ + !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \ !V8_TARGET_ARCH_RISCV32 #if defined(_M_X64) || defined(__x86_64__) #define V8_TARGET_ARCH_X64 1 @@ -706,8 +703,6 @@ V8 shared library set USING_V8_SHARED. #define V8_TARGET_ARCH_ARM 1 #elif defined(__mips64) #define V8_TARGET_ARCH_MIPS64 1 -#elif defined(__MIPSEB__) || defined(__MIPSEL__) -#define V8_TARGET_ARCH_MIPS 1 #elif defined(_ARCH_PPC64) #define V8_TARGET_ARCH_PPC64 1 #elif defined(_ARCH_PPC) @@ -785,9 +780,6 @@ V8 shared library set USING_V8_SHARED. #if (V8_TARGET_ARCH_ARM64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_ARM64)) #error Target architecture arm64 is only supported on arm64 and x64 host #endif -#if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS)) -#error Target architecture mips is only supported on mips and ia32 host -#endif #if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64)) #error Target architecture mips64 is only supported on mips64 and x64 host #endif @@ -812,12 +804,6 @@ V8 shared library set USING_V8_SHARED. #define V8_TARGET_LITTLE_ENDIAN 1 #elif V8_TARGET_ARCH_LOONG64 #define V8_TARGET_LITTLE_ENDIAN 1 -#elif V8_TARGET_ARCH_MIPS -#if defined(__MIPSEB__) -#define V8_TARGET_BIG_ENDIAN 1 -#else -#define V8_TARGET_LITTLE_ENDIAN 1 -#endif #elif V8_TARGET_ARCH_MIPS64 #if defined(__MIPSEB__) || defined(V8_TARGET_ARCH_MIPS64_BE) #define V8_TARGET_BIG_ENDIAN 1 diff --git a/src/base/compiler-specific.h b/src/base/compiler-specific.h index c1dd63e86d..3221de0834 100644 --- a/src/base/compiler-specific.h +++ b/src/base/compiler-specific.h @@ -98,10 +98,10 @@ // do not support adding noexcept to default members. // Disabled on MSVC because constructors of standard containers are not noexcept // there. -#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \ - !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \ - !defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) && \ - !defined(V8_TARGET_ARCH_RISCV64) && !defined(V8_TARGET_ARCH_RISCV32)) || \ +#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \ + !defined(V8_TARGET_ARCH_MIPS64) && !defined(V8_TARGET_ARCH_PPC) && \ + !defined(V8_TARGET_ARCH_PPC64) && !defined(V8_TARGET_ARCH_RISCV64) && \ + !defined(V8_TARGET_ARCH_RISCV32)) || \ (defined(__clang__) && __cplusplus > 201300L)) #define V8_NOEXCEPT noexcept #else diff --git a/src/base/cpu.cc b/src/base/cpu.cc index 94ceffab5c..f716403b05 100644 --- a/src/base/cpu.cc +++ b/src/base/cpu.cc @@ -89,8 +89,8 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { #endif // !V8_LIBC_MSVCRT -#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS || \ - V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64 +#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS64 || \ + V8_HOST_ARCH_RISCV64 #if V8_OS_LINUX @@ -198,48 +198,6 @@ static uint32_t ReadELFHWCaps() { #endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 -#if V8_HOST_ARCH_MIPS -int __detect_fp64_mode(void) { - double result = 0; - // Bit representation of (double)1 is 0x3FF0000000000000. - __asm__ volatile( - ".set push\n\t" - ".set noreorder\n\t" - ".set oddspreg\n\t" - "lui $t0, 0x3FF0\n\t" - "ldc1 $f0, %0\n\t" - "mtc1 $t0, $f1\n\t" - "sdc1 $f0, %0\n\t" - ".set pop\n\t" - : "+m"(result) - : - : "t0", "$f0", "$f1", "memory"); - - return !(result == 1); -} - - -int __detect_mips_arch_revision(void) { - // TODO(dusmil): Do the specific syscall as soon as it is implemented in mips - // kernel. - uint32_t result = 0; - __asm__ volatile( - "move $v0, $zero\n\t" - // Encoding for "addi $v0, $v0, 1" on non-r6, - // which is encoding for "bovc $v0, %v0, 1" on r6. - // Use machine code directly to avoid compilation errors with different - // toolchains and maintain compatibility. - ".word 0x20420001\n\t" - "sw $v0, %0\n\t" - : "=m"(result) - : - : "v0", "memory"); - // Result is 0 on r6 architectures, 1 on other architecture revisions. - // Fall-back to the least common denominator which is mips32 revision 1. - return result ? 1 : 6; -} -#endif // V8_HOST_ARCH_MIPS - // Extract the information exposed by the kernel via /proc/cpuinfo. class CPUInfo final { public: @@ -359,7 +317,7 @@ static bool HasListItem(const char* list, const char* item) { #endif // V8_OS_LINUX #endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || - // V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64 + // V8_HOST_ARCH_MIPS64 || V8_HOST_ARCH_RISCV64 #if defined(V8_OS_STARBOARD) @@ -742,7 +700,7 @@ CPU::CPU() #endif // V8_OS_LINUX -#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 +#elif V8_HOST_ARCH_MIPS64 // Simple detection of FPU at runtime for Linux. // It is based on /proc/cpuinfo, which reveals hardware configuration @@ -756,10 +714,6 @@ CPU::CPU() has_msa_ = HasListItem(ASEs, "msa"); delete[] cpu_model; delete[] ASEs; -#ifdef V8_HOST_ARCH_MIPS - is_fp64_mode_ = __detect_fp64_mode(); - architecture_ = __detect_mips_arch_revision(); -#endif #elif V8_HOST_ARCH_ARM64 #ifdef V8_OS_WIN diff --git a/src/base/numbers/strtod.cc b/src/base/numbers/strtod.cc index f74bf43fca..16ad5f04ef 100644 --- a/src/base/numbers/strtod.cc +++ b/src/base/numbers/strtod.cc @@ -153,7 +153,7 @@ static bool DoubleStrtod(Vector trimmed, int exponent, // result is not accurate. // We know that Windows32 with MSVC, unlike with MinGW32, uses 64 bits and is // therefore accurate. - // Note that the ARM and MIPS simulators are compiled for 32bits. They + // Note that the ARM simulators are compiled for 32bits. They // therefore exhibit the same problem. USE(exact_powers_of_ten); USE(kMaxExactDoubleIntegerDecimalDigits); diff --git a/src/base/platform/yield-processor.h b/src/base/platform/yield-processor.h index f20c06e1ae..5d2827da6d 100644 --- a/src/base/platform/yield-processor.h +++ b/src/base/platform/yield-processor.h @@ -43,11 +43,6 @@ #elif defined(V8_HOST_ARCH_ARM64) || \ (defined(V8_HOST_ARCH_ARM) && __ARM_ARCH >= 6) #define YIELD_PROCESSOR __asm__ __volatile__("yield") -#elif defined(V8_HOST_ARCH_MIPS) -// The MIPS32 docs state that the PAUSE instruction is a no-op on older -// architectures (first added in MIPS32r2). To avoid assembler errors when -// targeting pre-r2, we must encode the instruction manually. -#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140") #elif defined(V8_HOST_ARCH_MIPS64EL) && __mips_isa_rev >= 2 // Don't bother doing using .word here since r2 is the lowest supported mips64 // that Chromium supports. diff --git a/src/baseline/baseline-assembler-inl.h b/src/baseline/baseline-assembler-inl.h index 7c21012434..f692af4e13 100644 --- a/src/baseline/baseline-assembler-inl.h +++ b/src/baseline/baseline-assembler-inl.h @@ -36,8 +36,6 @@ #include "src/baseline/riscv/baseline-assembler-riscv-inl.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/baseline/mips64/baseline-assembler-mips64-inl.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/baseline/mips/baseline-assembler-mips-inl.h" #elif V8_TARGET_ARCH_LOONG64 #include "src/baseline/loong64/baseline-assembler-loong64-inl.h" #else diff --git a/src/baseline/baseline-compiler.cc b/src/baseline/baseline-compiler.cc index 5acdb028dd..4db43686ac 100644 --- a/src/baseline/baseline-compiler.cc +++ b/src/baseline/baseline-compiler.cc @@ -53,8 +53,6 @@ #include "src/baseline/riscv/baseline-compiler-riscv-inl.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/baseline/mips64/baseline-compiler-mips64-inl.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/baseline/mips/baseline-compiler-mips-inl.h" #elif V8_TARGET_ARCH_LOONG64 #include "src/baseline/loong64/baseline-compiler-loong64-inl.h" #else diff --git a/src/baseline/mips/baseline-assembler-mips-inl.h b/src/baseline/mips/baseline-assembler-mips-inl.h deleted file mode 100644 index 98d7ff1654..0000000000 --- a/src/baseline/mips/baseline-assembler-mips-inl.h +++ /dev/null @@ -1,573 +0,0 @@ -// Copyright 2021 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_BASELINE_MIPS_BASELINE_ASSEMBLER_MIPS_INL_H_ -#define V8_BASELINE_MIPS_BASELINE_ASSEMBLER_MIPS_INL_H_ - -#include "src/baseline/baseline-assembler.h" -#include "src/codegen/interface-descriptors.h" -#include "src/codegen/mips/assembler-mips-inl.h" -#include "src/objects/literal-objects-inl.h" - -namespace v8 { -namespace internal { -namespace baseline { - -class BaselineAssembler::ScratchRegisterScope { - public: - explicit ScratchRegisterScope(BaselineAssembler* assembler) - : assembler_(assembler), - prev_scope_(assembler->scratch_register_scope_), - wrapped_scope_(assembler->masm()) { - if (!assembler_->scratch_register_scope_) { - // If we haven't opened a scratch scope yet, for the first one add a - // couple of extra registers. - wrapped_scope_.Include({t4, t5, t6, t7}); - } - assembler_->scratch_register_scope_ = this; - } - ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } - - Register AcquireScratch() { return wrapped_scope_.Acquire(); } - - private: - BaselineAssembler* assembler_; - ScratchRegisterScope* prev_scope_; - UseScratchRegisterScope wrapped_scope_; -}; - -enum class Condition : uint32_t { - kEqual = eq, - kNotEqual = ne, - - kLessThan = lt, - kGreaterThan = gt, - kLessThanEqual = le, - kGreaterThanEqual = ge, - - kUnsignedLessThan = Uless, - kUnsignedGreaterThan = Ugreater, - kUnsignedLessThanEqual = Uless_equal, - kUnsignedGreaterThanEqual = Ugreater_equal, - - kOverflow = overflow, - kNoOverflow = no_overflow, - - kZero = eq, - kNotZero = ne, -}; - -inline internal::Condition AsMasmCondition(Condition cond) { - // This is important for arm, where the internal::Condition where each value - // represents an encoded bit field value. - static_assert(sizeof(internal::Condition) == sizeof(Condition)); - return static_cast(cond); -} - -namespace detail { - -#ifdef DEBUG -inline bool Clobbers(Register target, MemOperand op) { - return op.is_reg() && op.rm() == target; -} -#endif - -} // namespace detail - -#define __ masm_-> - -MemOperand BaselineAssembler::RegisterFrameOperand( - interpreter::Register interpreter_register) { - return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); -} -void BaselineAssembler::RegisterFrameAddress( - interpreter::Register interpreter_register, Register rscratch) { - return __ Addu(rscratch, fp, - interpreter_register.ToOperand() * kSystemPointerSize); -} -MemOperand BaselineAssembler::FeedbackVectorOperand() { - return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); -} - -void BaselineAssembler::Bind(Label* label) { __ bind(label); } - -void BaselineAssembler::JumpTarget() { - // NOP. -} -void BaselineAssembler::Jump(Label* target, Label::Distance distance) { - __ Branch(target); -} -void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, - Label* target, Label::Distance) { - __ JumpIfRoot(value, index, target); -} -void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, - Label* target, Label::Distance) { - __ JumpIfNotRoot(value, index, target); -} -void BaselineAssembler::JumpIfSmi(Register value, Label* target, - Label::Distance) { - __ JumpIfSmi(value, target); -} -void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, - Label::Distance) { - __ JumpIfNotSmi(value, target); -} -void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right, - Label* target, - Label::Distance distance) { - JumpIf(cc, left, Operand(right), target, distance); -} - -void BaselineAssembler::CallBuiltin(Builtin builtin) { - ASM_CODE_COMMENT_STRING(masm_, - __ CommentForOffHeapTrampoline("call", builtin)); - Register temp = t9; - __ LoadEntryFromBuiltin(builtin, temp); - __ Call(temp); -} - -void BaselineAssembler::TailCallBuiltin(Builtin builtin) { - ASM_CODE_COMMENT_STRING(masm_, - __ CommentForOffHeapTrampoline("tail call", builtin)); - Register temp = t9; - __ LoadEntryFromBuiltin(builtin, temp); - __ Jump(temp); -} - -void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc, - Label* target, Label::Distance) { - ScratchRegisterScope temps(this); - Register scratch = temps.AcquireScratch(); - __ And(scratch, value, Operand(mask)); - __ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg)); -} - -void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs, - Label* target, Label::Distance) { - __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs)); -} -void BaselineAssembler::JumpIfObjectType(Condition cc, Register object, - InstanceType instance_type, - Register map, Label* target, - Label::Distance) { - ScratchRegisterScope temps(this); - Register type = temps.AcquireScratch(); - __ GetObjectType(object, map, type); - __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type)); -} -void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, - InstanceType instance_type, - Label* target, Label::Distance) { - ScratchRegisterScope temps(this); - Register type = temps.AcquireScratch(); - if (v8_flags.debug_code) { - __ AssertNotSmi(map); - __ GetObjectType(map, type, type); - __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); - } - __ Lw(type, FieldMemOperand(map, Map::kInstanceTypeOffset)); - __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type)); -} -void BaselineAssembler::JumpIfPointer(Condition cc, Register value, - MemOperand operand, Label* target, - Label::Distance) { - ScratchRegisterScope temps(this); - Register scratch = temps.AcquireScratch(); - __ Lw(scratch, operand); - __ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); -} -void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, - Label* target, Label::Distance) { - ScratchRegisterScope temps(this); - Register scratch = temps.AcquireScratch(); - __ li(scratch, Operand(smi)); - __ SmiUntag(scratch); - __ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); -} -void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, - Label* target, Label::Distance) { - __ AssertSmi(lhs); - __ AssertSmi(rhs); - __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs)); -} -void BaselineAssembler::JumpIfTagged(Condition cc, Register value, - MemOperand operand, Label* target, - Label::Distance) { - ScratchRegisterScope temps(this); - Register scratch = temps.AcquireScratch(); - __ Lw(scratch, operand); - __ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); -} -void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, - Register value, Label* target, - Label::Distance) { - ScratchRegisterScope temps(this); - Register scratch = temps.AcquireScratch(); - __ Lw(scratch, operand); - __ Branch(target, AsMasmCondition(cc), scratch, Operand(value)); -} -void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, - Label* target, Label::Distance) { - __ Branch(target, AsMasmCondition(cc), value, Operand(byte)); -} - -void BaselineAssembler::Move(interpreter::Register output, Register source) { - Move(RegisterFrameOperand(output), source); -} -void BaselineAssembler::Move(Register output, TaggedIndex value) { - __ li(output, Operand(value.ptr())); -} -void BaselineAssembler::Move(MemOperand output, Register source) { - __ Sw(source, output); -} -void BaselineAssembler::Move(Register output, ExternalReference reference) { - __ li(output, Operand(reference)); -} -void BaselineAssembler::Move(Register output, Handle value) { - __ li(output, Operand(value)); -} -void BaselineAssembler::Move(Register output, int32_t value) { - __ li(output, Operand(value)); -} -void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { - __ Move(output, source); -} -void BaselineAssembler::MoveSmi(Register output, Register source) { - __ Move(output, source); -} - -namespace detail { - -template -inline Register ToRegister(BaselineAssembler* basm, - BaselineAssembler::ScratchRegisterScope* scope, - Arg arg) { - Register reg = scope->AcquireScratch(); - basm->Move(reg, arg); - return reg; -} -inline Register ToRegister(BaselineAssembler* basm, - BaselineAssembler::ScratchRegisterScope* scope, - Register reg) { - return reg; -} - -template -struct PushAllHelper; -template <> -struct PushAllHelper<> { - static int Push(BaselineAssembler* basm) { return 0; } - static int PushReverse(BaselineAssembler* basm) { return 0; } -}; -// TODO(ishell): try to pack sequence of pushes into one instruction by -// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) -// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). -template -struct PushAllHelper { - static int Push(BaselineAssembler* basm, Arg arg) { - BaselineAssembler::ScratchRegisterScope scope(basm); - basm->masm()->Push(ToRegister(basm, &scope, arg)); - return 1; - } - static int PushReverse(BaselineAssembler* basm, Arg arg) { - return Push(basm, arg); - } -}; -// TODO(ishell): try to pack sequence of pushes into one instruction by -// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4) -// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4). -template -struct PushAllHelper { - static int Push(BaselineAssembler* basm, Arg arg, Args... args) { - PushAllHelper::Push(basm, arg); - return 1 + PushAllHelper::Push(basm, args...); - } - static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) { - int nargs = PushAllHelper::PushReverse(basm, args...); - PushAllHelper::Push(basm, arg); - return nargs + 1; - } -}; -template <> -struct PushAllHelper { - static int Push(BaselineAssembler* basm, interpreter::RegisterList list) { - for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) { - PushAllHelper::Push(basm, list[reg_index]); - } - return list.register_count(); - } - static int PushReverse(BaselineAssembler* basm, - interpreter::RegisterList list) { - for (int reg_index = list.register_count() - 1; reg_index >= 0; - --reg_index) { - PushAllHelper::Push(basm, list[reg_index]); - } - return list.register_count(); - } -}; - -template -struct PopAllHelper; -template <> -struct PopAllHelper<> { - static void Pop(BaselineAssembler* basm) {} -}; -// TODO(ishell): try to pack sequence of pops into one instruction by -// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4) -// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4). -template <> -struct PopAllHelper { - static void Pop(BaselineAssembler* basm, Register reg) { - basm->masm()->Pop(reg); - } -}; -template -struct PopAllHelper { - static void Pop(BaselineAssembler* basm, Register reg, T... tail) { - PopAllHelper::Pop(basm, reg); - PopAllHelper::Pop(basm, tail...); - } -}; - -} // namespace detail - -template -int BaselineAssembler::Push(T... vals) { - return detail::PushAllHelper::Push(this, vals...); -} - -template -void BaselineAssembler::PushReverse(T... vals) { - detail::PushAllHelper::PushReverse(this, vals...); -} - -template -void BaselineAssembler::Pop(T... registers) { - detail::PopAllHelper::Pop(this, registers...); -} - -void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, - int offset) { - __ Lw(output, FieldMemOperand(source, offset)); -} -void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, - int offset) { - __ Lw(output, FieldMemOperand(source, offset)); -} -void BaselineAssembler::LoadTaggedSignedFieldAndUntag(Register output, - Register source, - int offset) { - LoadTaggedSignedField(output, source, offset); - SmiUntag(output); -} -void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, - int offset) { - __ Lw(output, FieldMemOperand(source, offset)); -} -void BaselineAssembler::LoadWord16FieldZeroExtend(Register output, - Register source, int offset) { - __ lhu(output, FieldMemOperand(source, offset)); -} -void BaselineAssembler::LoadWord8Field(Register output, Register source, - int offset) { - __ lb(output, FieldMemOperand(source, offset)); -} -void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, - Smi value) { - ASM_CODE_COMMENT(masm_); - ScratchRegisterScope temps(this); - Register scratch = temps.AcquireScratch(); - __ li(scratch, Operand(value)); - __ Sw(scratch, FieldMemOperand(target, offset)); -} -void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, - int offset, - Register value) { - ASM_CODE_COMMENT(masm_); - __ Sw(value, FieldMemOperand(target, offset)); - ScratchRegisterScope temps(this); - Register scratch = temps.AcquireScratch(); - __ RecordWriteField(target, offset, value, scratch, kRAHasNotBeenSaved, - SaveFPRegsMode::kIgnore); -} -void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, - int offset, - Register value) { - __ Sw(value, FieldMemOperand(target, offset)); -} - -void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result, - Register feedback_vector, - FeedbackSlot slot, - Label* on_result, - Label::Distance) { - Label fallthrough; - LoadTaggedPointerField(scratch_and_result, feedback_vector, - FeedbackVector::OffsetOfElementAt(slot.ToInt())); - __ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough); - // Is it marked_for_deoptimization? If yes, clear the slot. - { - ScratchRegisterScope temps(this); - Register scratch = temps.AcquireScratch(); - __ TestCodeTIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch, - eq, on_result); - __ li(scratch, __ ClearedValue()); - StoreTaggedFieldNoWriteBarrier( - feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()), - scratch); - } - __ bind(&fallthrough); - Move(scratch_and_result, 0); -} - -void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( - int32_t weight, Label* skip_interrupt_label) { - ASM_CODE_COMMENT(masm_); - ScratchRegisterScope scratch_scope(this); - Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); - - Register interrupt_budget = scratch_scope.AcquireScratch(); - __ Lw(interrupt_budget, - FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); - __ Addu(interrupt_budget, interrupt_budget, weight); - __ Sw(interrupt_budget, - FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); - if (skip_interrupt_label) { - DCHECK_LT(weight, 0); - __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg)); - } -} -void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( - Register weight, Label* skip_interrupt_label) { - ASM_CODE_COMMENT(masm_); - ScratchRegisterScope scratch_scope(this); - Register feedback_cell = scratch_scope.AcquireScratch(); - LoadFunction(feedback_cell); - LoadTaggedPointerField(feedback_cell, feedback_cell, - JSFunction::kFeedbackCellOffset); - - Register interrupt_budget = scratch_scope.AcquireScratch(); - __ Lw(interrupt_budget, - FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); - __ Addu(interrupt_budget, interrupt_budget, weight); - __ Sw(interrupt_budget, - FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); - if (skip_interrupt_label) - __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg)); -} - -void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, - uint32_t depth) { - for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); - } - LoadTaggedAnyField(kInterpreterAccumulatorRegister, context, - Context::OffsetOfElementAt(index)); -} - -void BaselineAssembler::StaContextSlot(Register context, Register value, - uint32_t index, uint32_t depth) { - for (; depth > 0; --depth) { - LoadTaggedPointerField(context, context, Context::kPreviousOffset); - } - StoreTaggedFieldWithWriteBarrier(context, Context::OffsetOfElementAt(index), - value); -} - -void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { - __ Addu(lhs, lhs, Operand(rhs)); -} - -void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) { - __ And(output, lhs, Operand(rhs)); -} - -void BaselineAssembler::Switch(Register reg, int case_value_base, - Label** labels, int num_labels) { - ASM_CODE_COMMENT(masm_); - Label fallthrough; - if (case_value_base != 0) { - __ Subu(reg, reg, Operand(case_value_base)); - } - - __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual), - reg, Operand(num_labels)); - - __ GenerateSwitchTable(reg, num_labels, - [labels](size_t i) { return labels[i]; }); - - __ bind(&fallthrough); -} - -#undef __ - -#define __ basm. - -void BaselineAssembler::EmitReturn(MacroAssembler* masm) { - ASM_CODE_COMMENT(masm); - BaselineAssembler basm(masm); - - Register weight = BaselineLeaveFrameDescriptor::WeightRegister(); - Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister(); - - { - ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget"); - - Label skip_interrupt_label; - __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label); - __ masm()->SmiTag(params_size); - __ masm()->Push(params_size, kInterpreterAccumulatorRegister); - - __ LoadContext(kContextRegister); - __ LoadFunction(kJSFunctionRegister); - __ masm()->Push(kJSFunctionRegister); - __ CallRuntime(Runtime::kBytecodeBudgetInterrupt_Sparkplug, 1); - - __ masm()->Pop(params_size, kInterpreterAccumulatorRegister); - __ masm()->SmiUntag(params_size); - - __ Bind(&skip_interrupt_label); - } - - BaselineAssembler::ScratchRegisterScope temps(&basm); - Register actual_params_size = temps.AcquireScratch(); - // Compute the size of the actual parameters + receiver (in bytes). - __ Move(actual_params_size, - MemOperand(fp, StandardFrameConstants::kArgCOffset)); - - // If actual is bigger than formal, then we should use it to free up the stack - // arguments. - Label corrected_args_count; - __ masm()->Branch(&corrected_args_count, ge, params_size, - Operand(actual_params_size)); - __ masm()->Move(params_size, actual_params_size); - __ Bind(&corrected_args_count); - - // Leave the frame (also dropping the register file). - __ masm()->LeaveFrame(StackFrame::BASELINE); - - // Drop receiver + arguments. - __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); - - __ masm()->Ret(); -} - -#undef __ - -inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( - Register reg) { - assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg, - Operand(kInterpreterAccumulatorRegister)); -} - -} // namespace baseline -} // namespace internal -} // namespace v8 - -#endif // V8_BASELINE_MIPS_BASELINE_ASSEMBLER_MIPS_INL_H_ diff --git a/src/baseline/mips/baseline-compiler-mips-inl.h b/src/baseline/mips/baseline-compiler-mips-inl.h deleted file mode 100644 index 3e8bb98e14..0000000000 --- a/src/baseline/mips/baseline-compiler-mips-inl.h +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2021 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_ -#define V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_ - -#include "src/base/logging.h" -#include "src/baseline/baseline-compiler.h" - -namespace v8 { -namespace internal { -namespace baseline { - -#define __ basm_. - -void BaselineCompiler::Prologue() { - ASM_CODE_COMMENT(&masm_); - __ masm()->EnterFrame(StackFrame::BASELINE); - DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); - int max_frame_size = - bytecode_->frame_size() + max_call_args_ * kSystemPointerSize; - CallBuiltin( - kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, - max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); - - PrologueFillFrame(); -} - -void BaselineCompiler::PrologueFillFrame() { - ASM_CODE_COMMENT(&masm_); - // Inlined register frame fill - interpreter::Register new_target_or_generator_register = - bytecode_->incoming_new_target_or_generator_register(); - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); - int register_count = bytecode_->register_count(); - // Magic value - const int kLoopUnrollSize = 8; - const int new_target_index = new_target_or_generator_register.index(); - const bool has_new_target = new_target_index != kMaxInt; - if (has_new_target) { - DCHECK_LE(new_target_index, register_count); - __ masm()->Addu(sp, sp, Operand(-(kPointerSize * new_target_index))); - for (int i = 0; i < new_target_index; i++) { - __ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4)); - } - // Push new_target_or_generator. - __ Push(kJavaScriptCallNewTargetRegister); - register_count -= new_target_index + 1; - } - if (register_count < 2 * kLoopUnrollSize) { - // If the frame is small enough, just unroll the frame fill completely. - __ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count))); - for (int i = 0; i < register_count; ++i) { - __ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4)); - } - } else { - __ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count))); - for (int i = 0; i < register_count; ++i) { - __ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4)); - } - } -} - -void BaselineCompiler::VerifyFrameSize() { - ASM_CODE_COMMENT(&masm_); - __ masm()->Addu(kScratchReg, sp, - Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp + - bytecode_->frame_size())); - __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg, - Operand(fp)); -} - -} // namespace baseline -} // namespace internal -} // namespace v8 - -#endif // V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_ diff --git a/src/builtins/builtins-internal-gen.cc b/src/builtins/builtins-internal-gen.cc index e045e73c06..eb7790d3fc 100644 --- a/src/builtins/builtins-internal-gen.cc +++ b/src/builtins/builtins-internal-gen.cc @@ -1270,11 +1270,11 @@ void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit( Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true); } -#if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) +#if !defined(V8_TARGET_ARCH_ARM) void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) { masm->Call(BUILTIN_CODE(masm->isolate(), Illegal), RelocInfo::CODE_TARGET); } -#endif // !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) +#endif // !defined(V8_TARGET_ARCH_ARM) #ifndef V8_TARGET_ARCH_IA32 void Builtins::Generate_MemMove(MacroAssembler* masm) { diff --git a/src/builtins/builtins-sharedarraybuffer-gen.cc b/src/builtins/builtins-sharedarraybuffer-gen.cc index ff95dd7de4..c6b824cab7 100644 --- a/src/builtins/builtins-sharedarraybuffer-gen.cc +++ b/src/builtins/builtins-sharedarraybuffer-gen.cc @@ -251,16 +251,6 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { BIND(&u32); Return(ChangeUint32ToTagged(AtomicLoad( AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2)))); -#if (V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6) - BIND(&i64); - Goto(&u64); - - BIND(&u64); - { - TNode index_number = ChangeUintPtrToTagged(index_word); - Return(CallRuntime(Runtime::kAtomicsLoad64, context, array, index_number)); - } -#else BIND(&i64); Return(BigIntFromSigned64(AtomicLoad64( AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3)))); @@ -268,7 +258,6 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { BIND(&u64); Return(BigIntFromUnsigned64(AtomicLoad64( AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3)))); -#endif //(V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6) // This shouldn't happen, we've already validated the type. BIND(&other); @@ -358,11 +347,6 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { Return(value_integer); BIND(&u64); -#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6 - TNode index_number = ChangeUintPtrToTagged(index_word); - Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_number, - value)); -#else // 4. If arrayTypeName is "BigUint64Array" or "BigInt64Array", // let v be ? ToBigInt(value). TNode value_bigint = ToBigInt(context, value); @@ -379,7 +363,6 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { AtomicStore64(AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3), var_low.value(), high); Return(value_bigint); -#endif // This shouldn't happen, we've already validated the type. BIND(&other); @@ -423,7 +406,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { TNode index_word = ValidateAtomicAccess(array, index_or_field_name, context); -#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#if V8_TARGET_ARCH_MIPS64 TNode index_number = ChangeUintPtrToTagged(index_word); Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_number, value)); @@ -523,7 +506,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); -#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#endif // V8_TARGET_ARCH_MIPS64 BIND(&detached_or_out_of_bounds); { @@ -558,7 +541,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { // 2. Let i be ? ValidateAtomicAccess(typedArray, index). TNode index_word = ValidateAtomicAccess(array, index, context); -#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#if V8_TARGET_ARCH_MIPS64 TNode index_number = ChangeUintPtrToTagged(index_word); Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array, index_number, old_value, new_value)); @@ -677,7 +660,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); -#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#endif // V8_TARGET_ARCH_MIPS64 BIND(&detached_or_out_of_bounds); { @@ -728,7 +711,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( // 2. Let i be ? ValidateAtomicAccess(typedArray, index). TNode index_word = ValidateAtomicAccess(array, index, context); -#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#if V8_TARGET_ARCH_MIPS64 TNode index_number = ChangeUintPtrToTagged(index_word); Return(CallRuntime(runtime_function, context, array, index_number, value)); #else @@ -818,7 +801,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( // // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); -#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#endif // V8_TARGET_ARCH_MIPS64 BIND(&detached_or_out_of_bounds); ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name); diff --git a/src/builtins/builtins.cc b/src/builtins/builtins.cc index faa1308b98..f1311a8831 100644 --- a/src/builtins/builtins.cc +++ b/src/builtins/builtins.cc @@ -576,14 +576,14 @@ bool Builtins::CodeObjectIsExecutable(Builtin builtin) { case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit: return true; default: -#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#if V8_TARGET_ARCH_MIPS64 // TODO(Loongson): Move non-JS linkage builtins code objects into RO_SPACE // caused MIPS platform to crash, and we need some time to handle it. Now // disable this change temporarily on MIPS platform. return true; #else return false; -#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#endif // V8_TARGET_ARCH_MIPS64 } } diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc deleted file mode 100644 index 0add10f2f6..0000000000 --- a/src/builtins/mips/builtins-mips.cc +++ /dev/null @@ -1,4263 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_MIPS - -#include "src/api/api-arguments.h" -#include "src/codegen/code-factory.h" -#include "src/codegen/interface-descriptors-inl.h" -#include "src/debug/debug.h" -#include "src/deoptimizer/deoptimizer.h" -#include "src/execution/frame-constants.h" -#include "src/execution/frames.h" -#include "src/logging/counters.h" -// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. -#include "src/codegen/macro-assembler-inl.h" -#include "src/codegen/mips/constants-mips.h" -#include "src/codegen/register-configuration.h" -#include "src/heap/heap-inl.h" -#include "src/objects/cell.h" -#include "src/objects/foreign.h" -#include "src/objects/heap-number.h" -#include "src/objects/js-generator.h" -#include "src/objects/objects-inl.h" -#include "src/objects/smi.h" -#include "src/runtime/runtime.h" - -#if V8_ENABLE_WEBASSEMBLY -#include "src/wasm/wasm-linkage.h" -#include "src/wasm/wasm-objects.h" -#endif // V8_ENABLE_WEBASSEMBLY - -namespace v8 { -namespace internal { - -#define __ ACCESS_MASM(masm) - -void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) { - __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address)); - __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame), - RelocInfo::CODE_TARGET); -} - -static void GenerateTailCallToReturnedCode(MacroAssembler* masm, - Runtime::FunctionId function_id) { - // ----------- S t a t e ------------- - // -- a0 : actual argument count - // -- a1 : target function (preserved for callee) - // -- a3 : new target (preserved for callee) - // ----------------------------------- - { - FrameScope scope(masm, StackFrame::INTERNAL); - // Push a copy of the target function, the new target and the actual - // argument count. - // Push function as parameter to the runtime call. - __ SmiTag(kJavaScriptCallArgCountRegister); - __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, - kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister); - - __ CallRuntime(function_id, 1); - - // Restore target function, new target and actual argument count. - __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister, - kJavaScriptCallArgCountRegister); - __ SmiUntag(kJavaScriptCallArgCountRegister); - } - - static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - __ Addu(a2, v0, Code::kHeaderSize - kHeapObjectTag); - __ Jump(a2); -} - -namespace { - -enum class ArgumentsElementType { - kRaw, // Push arguments as they are. - kHandle // Dereference arguments before pushing. -}; - -void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc, - Register scratch, Register scratch2, - ArgumentsElementType element_type) { - DCHECK(!AreAliased(array, argc, scratch)); - Label loop, entry; - __ Subu(scratch, argc, Operand(kJSArgcReceiverSlots)); - __ Branch(&entry); - __ bind(&loop); - __ Lsa(scratch2, array, scratch, kSystemPointerSizeLog2); - __ lw(scratch2, MemOperand(scratch2)); - if (element_type == ArgumentsElementType::kHandle) { - __ lw(scratch2, MemOperand(scratch2)); - } - __ push(scratch2); - __ bind(&entry); - __ Addu(scratch, scratch, Operand(-1)); - __ Branch(&loop, greater_equal, scratch, Operand(zero_reg)); -} - -void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : number of arguments - // -- a1 : constructor function - // -- a3 : new target - // -- cp : context - // -- ra : return address - // -- sp[...]: constructor arguments - // ----------------------------------- - - // Enter a construct frame. - { - FrameScope scope(masm, StackFrame::CONSTRUCT); - - // Preserve the incoming parameters on the stack. - __ SmiTag(a0); - __ Push(cp, a0); - __ SmiUntag(a0); - // Set up pointer to first argument (skip receiver). - __ Addu( - t2, fp, - Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize)); - // Copy arguments and receiver to the expression stack. - // t2: Pointer to start of arguments. - // a0: Number of arguments. - Generate_PushArguments(masm, t2, a0, t3, t0, ArgumentsElementType::kRaw); - // The receiver for the builtin/api call. - __ PushRoot(RootIndex::kTheHoleValue); - - // Call the function. - // a0: number of arguments (untagged) - // a1: constructor function - // a3: new target - __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); - - // Restore context from the frame. - __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); - // Restore smi-tagged arguments count from the frame. - __ lw(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); - // Leave construct frame. - } - - // Remove caller arguments from the stack and return. - __ DropArguments(t3, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); - __ Ret(); -} - -} // namespace - -// The construct stub for ES5 constructor functions and ES6 class constructors. -void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0: number of arguments (untagged) - // -- a1: constructor function - // -- a3: new target - // -- cp: context - // -- ra: return address - // -- sp[...]: constructor arguments - // ----------------------------------- - - // Enter a construct frame. - FrameScope scope(masm, StackFrame::MANUAL); - Label post_instantiation_deopt_entry, not_create_implicit_receiver; - __ EnterFrame(StackFrame::CONSTRUCT); - - // Preserve the incoming parameters on the stack. - __ SmiTag(a0); - __ Push(cp, a0, a1); - __ PushRoot(RootIndex::kTheHoleValue); - __ Push(a3); - - // ----------- S t a t e ------------- - // -- sp[0*kPointerSize]: new target - // -- sp[1*kPointerSize]: padding - // -- a1 and sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments (tagged) - // -- sp[4*kPointerSize]: context - // ----------------------------------- - - __ lw(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - __ lw(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset)); - __ DecodeField(t2); - __ JumpIfIsInRange( - t2, static_cast(FunctionKind::kDefaultDerivedConstructor), - static_cast(FunctionKind::kDerivedConstructor), - ¬_create_implicit_receiver); - - // If not derived class constructor: Allocate the new receiver object. - __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET); - __ Branch(&post_instantiation_deopt_entry); - - // Else: use TheHoleValue as receiver for constructor call - __ bind(¬_create_implicit_receiver); - __ LoadRoot(v0, RootIndex::kTheHoleValue); - - // ----------- S t a t e ------------- - // -- v0: receiver - // -- Slot 4 / sp[0*kPointerSize]: new target - // -- Slot 3 / sp[1*kPointerSize]: padding - // -- Slot 2 / sp[2*kPointerSize]: constructor function - // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged) - // -- Slot 0 / sp[4*kPointerSize]: context - // ----------------------------------- - // Deoptimizer enters here. - masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset( - masm->pc_offset()); - __ bind(&post_instantiation_deopt_entry); - - // Restore new target. - __ Pop(a3); - - // Push the allocated receiver to the stack. - __ Push(v0); - - // We need two copies because we may have to return the original one - // and the calling conventions dictate that the called function pops the - // receiver. The second copy is pushed after the arguments, we saved in s0 - // since v0 will store the return value of callRuntime. - __ mov(s0, v0); - - // Set up pointer to last argument. - __ Addu(t2, fp, Operand(StandardFrameConstants::kCallerSPOffset + - kSystemPointerSize)); - - // ----------- S t a t e ------------- - // -- r3: new target - // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: implicit receiver - // -- sp[2*kPointerSize]: padding - // -- sp[3*kPointerSize]: constructor function - // -- sp[4*kPointerSize]: number of arguments (tagged) - // -- sp[5*kPointerSize]: context - // ----------------------------------- - - // Restore constructor function and argument count. - __ lw(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); - __ lw(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); - __ SmiUntag(a0); - - Label stack_overflow; - __ StackOverflowCheck(a0, t0, t1, &stack_overflow); - - // TODO(victorgomes): When the arguments adaptor is completely removed, we - // should get the formal parameter count and copy the arguments in its - // correct position (including any undefined), instead of delaying this to - // InvokeFunction. - - // Copy arguments and receiver to the expression stack. - // t2: Pointer to start of argument. - // a0: Number of arguments. - Generate_PushArguments(masm, t2, a0, t0, t1, ArgumentsElementType::kRaw); - - // We need two copies because we may have to return the original one - // and the calling conventions dictate that the called function pops the - // receiver. The second copy is pushed after the arguments. - __ Push(s0); - - // Call the function. - __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); - - // ----------- S t a t e ------------- - // -- v0: constructor result - // -- sp[0*kPointerSize]: implicit receiver - // -- sp[1*kPointerSize]: padding - // -- sp[2*kPointerSize]: constructor function - // -- sp[3*kPointerSize]: number of arguments - // -- sp[4*kPointerSize]: context - // ----------------------------------- - - // Store offset of return address for deoptimizer. - masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset( - masm->pc_offset()); - - // If the result is an object (in the ECMA sense), we should get rid - // of the receiver and use the result; see ECMA-262 section 13.2.2-7 - // on page 74. - Label use_receiver, do_throw, leave_and_return, check_receiver; - - // If the result is undefined, we jump out to using the implicit receiver. - __ JumpIfNotRoot(v0, RootIndex::kUndefinedValue, &check_receiver); - - // Otherwise we do a smi check and fall through to check if the return value - // is a valid receiver. - - // Throw away the result of the constructor invocation and use the - // on-stack receiver as the result. - __ bind(&use_receiver); - __ lw(v0, MemOperand(sp, 0 * kPointerSize)); - __ JumpIfRoot(v0, RootIndex::kTheHoleValue, &do_throw); - - __ bind(&leave_and_return); - // Restore smi-tagged arguments count from the frame. - __ lw(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); - // Leave construct frame. - __ LeaveFrame(StackFrame::CONSTRUCT); - - // Remove caller arguments from the stack and return. - __ DropArguments(a1, TurboAssembler::kCountIsSmi, - TurboAssembler::kCountIncludesReceiver); - __ Ret(); - - __ bind(&check_receiver); - // If the result is a smi, it is *not* an object in the ECMA sense. - __ JumpIfSmi(v0, &use_receiver); - - // If the type of the result (stored in its map) is less than - // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense. - __ GetObjectType(v0, t2, t2); - static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ Branch(&leave_and_return, greater_equal, t2, - Operand(FIRST_JS_RECEIVER_TYPE)); - __ Branch(&use_receiver); - - __ bind(&do_throw); - // Restore the context from the frame. - __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); - __ break_(0xCC); - - __ bind(&stack_overflow); - // Restore the context from the frame. - __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); - __ CallRuntime(Runtime::kThrowStackOverflow); - // Unreachable code. - __ break_(0xCC); -} - -void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { - Generate_JSBuiltinsConstructStubHelper(masm); -} - -void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(a1); - __ CallRuntime(Runtime::kThrowConstructedNonConstructable); -} - -// Clobbers scratch1 and scratch2; preserves all other registers. -static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc, - Register scratch1, Register scratch2) { - ASM_CODE_COMMENT(masm); - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - Label okay; - __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit); - // Make a2 the space we have left. The stack might already be overflowed - // here which will cause a2 to become negative. - __ Subu(scratch1, sp, scratch1); - // Check if the arguments will overflow the stack. - __ sll(scratch2, argc, kPointerSizeLog2); - // Signed comparison. - __ Branch(&okay, gt, scratch1, Operand(scratch2)); - - // Out of stack space. - __ CallRuntime(Runtime::kThrowStackOverflow); - - __ bind(&okay); -} - -namespace { - -// Used by JSEntryTrampoline to refer C++ parameter to JSEntryVariant. -constexpr int kPushedStackSpace = - kCArgsSlotsSize + (kNumCalleeSaved + 1) * kPointerSize + - kNumCalleeSavedFPU * kDoubleSize + 4 * kPointerSize + - EntryFrameConstants::kCallerFPOffset; - -// Called with the native C calling convention. The corresponding function -// signature is either: -// -// using JSEntryFunction = GeneratedCode; -// or -// using JSEntryFunction = GeneratedCode; -// -// Passes through a0, a1, a2, a3 and stack to JSEntryTrampoline. -void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, - Builtin entry_trampoline) { - Label invoke, handler_entry, exit; - - int pushed_stack_space = kCArgsSlotsSize; - { - NoRootArrayScope no_root_array(masm); - - // Registers: - // a0: root_register_value - - // Save callee saved registers on the stack. - __ MultiPush(kCalleeSaved | ra); - pushed_stack_space += - kNumCalleeSaved * kPointerSize + kPointerSize /* ra */; - - // Save callee-saved FPU registers. - __ MultiPushFPU(kCalleeSavedFPU); - pushed_stack_space += kNumCalleeSavedFPU * kDoubleSize; - - // Set up the reserved register for 0.0. - __ Move(kDoubleRegZero, 0.0); - - // Initialize the root register. - // C calling convention. The first argument is passed in a0. - __ mov(kRootRegister, a0); - } - - // We build an EntryFrame. - __ li(t3, Operand(-1)); // Push a bad frame pointer to fail if it is used. - __ li(t2, Operand(StackFrame::TypeToMarker(type))); - __ li(t1, Operand(StackFrame::TypeToMarker(type))); - __ li(t4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, - masm->isolate())); - __ lw(t0, MemOperand(t4)); - __ Push(t3, t2, t1, t0); - pushed_stack_space += 4 * kPointerSize; - - // Clear c_entry_fp, now we've pushed its previous value to the stack. - // If the c_entry_fp is not already zero and we don't clear it, the - // SafeStackFrameIterator will assume we are executing C++ and miss the JS - // frames on top. - __ Sw(zero_reg, MemOperand(t4)); - - // Set up frame pointer for the frame to be pushed. - __ addiu(fp, sp, -EntryFrameConstants::kCallerFPOffset); - pushed_stack_space += EntryFrameConstants::kCallerFPOffset; - - // Registers: - // a0: root_register_value - // - // Stack: - // caller fp | - // function slot | entry frame - // context slot | - // bad fp (0xFF...F) | - // callee saved registers + ra - // 4 args slots - - // If this is the outermost JS call, set js_entry_sp value. - Label non_outermost_js; - ExternalReference js_entry_sp = ExternalReference::Create( - IsolateAddressId::kJSEntrySPAddress, masm->isolate()); - __ li(t1, js_entry_sp); - __ lw(t2, MemOperand(t1)); - __ Branch(&non_outermost_js, ne, t2, Operand(zero_reg)); - __ sw(fp, MemOperand(t1)); - __ li(t0, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); - Label cont; - __ b(&cont); - __ nop(); // Branch delay slot nop. - __ bind(&non_outermost_js); - __ li(t0, Operand(StackFrame::INNER_JSENTRY_FRAME)); - __ bind(&cont); - __ push(t0); - - // Jump to a faked try block that does the invoke, with a faked catch - // block that sets the pending exception. - __ jmp(&invoke); - __ bind(&handler_entry); - - // Store the current pc as the handler offset. It's used later to create the - // handler table. - masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos()); - - // Caught exception: Store result (exception) in the pending exception - // field in the JSEnv and return a failure sentinel. Coming in here the - // fp will be invalid because the PushStackHandler below sets it to 0 to - // signal the existence of the JSEntry frame. - __ li(t0, ExternalReference::Create( - IsolateAddressId::kPendingExceptionAddress, masm->isolate())); - __ sw(v0, MemOperand(t0)); // We come back from 'invoke'. result is in v0. - __ LoadRoot(v0, RootIndex::kException); - __ b(&exit); // b exposes branch delay slot. - __ nop(); // Branch delay slot nop. - - // Invoke: Link this frame into the handler chain. - __ bind(&invoke); - __ PushStackHandler(); - // If an exception not caught by another handler occurs, this handler - // returns control to the code after the bal(&invoke) above, which - // restores all kCalleeSaved registers (including cp and fp) to their - // saved values before returning a failure to C. - // - // Preserve a1, a2 and a3 passed by C++ and pass them to the trampoline. - // - // Stack: - // handler frame - // entry frame - // callee saved registers + ra - // 4 args slots - // - // Invoke the function by calling through JS entry trampoline builtin and - // pop the faked function when we return. - Handle trampoline_code = - masm->isolate()->builtins()->code_handle(entry_trampoline); - DCHECK_EQ(kPushedStackSpace, pushed_stack_space); - USE(pushed_stack_space); - __ Call(trampoline_code, RelocInfo::CODE_TARGET); - - // Unlink this frame from the handler chain. - __ PopStackHandler(); - - __ bind(&exit); // v0 holds result - // Check if the current stack frame is marked as the outermost JS frame. - Label non_outermost_js_2; - __ pop(t1); - __ Branch(&non_outermost_js_2, ne, t1, - Operand(StackFrame::OUTERMOST_JSENTRY_FRAME)); - __ li(t1, ExternalReference(js_entry_sp)); - __ sw(zero_reg, MemOperand(t1)); - __ bind(&non_outermost_js_2); - - // Restore the top frame descriptors from the stack. - __ pop(t1); - __ li(t0, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, - masm->isolate())); - __ sw(t1, MemOperand(t0)); - - // Reset the stack to the callee saved registers. - __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); - - // Restore callee-saved fpu registers. - __ MultiPopFPU(kCalleeSavedFPU); - - // Restore callee saved registers from the stack. - __ MultiPop(kCalleeSaved | ra); - // Return. - __ Jump(ra); -} - -} // namespace - -void Builtins::Generate_JSEntry(MacroAssembler* masm) { - Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline); -} - -void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) { - Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY, - Builtin::kJSConstructEntryTrampoline); -} - -void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) { - Generate_JSEntryVariant(masm, StackFrame::ENTRY, - Builtin::kRunMicrotasksTrampoline); -} - -static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, - bool is_construct) { - // ----------- S t a t e ------------- - // -- a0: root_register_value (unused) - // -- a1: new.target - // -- a2: function - // -- a3: receiver_pointer - // -- [fp + kPushedStackSpace + 0 * kPointerSize]: argc - // -- [fp + kPushedStackSpace + 1 * kPointerSize]: argv - // ----------------------------------- - - // Enter an internal frame. - { - FrameScope scope(masm, StackFrame::INTERNAL); - - // Setup the context (we need to use the caller context from the isolate). - ExternalReference context_address = ExternalReference::Create( - IsolateAddressId::kContextAddress, masm->isolate()); - __ li(cp, context_address); - __ lw(cp, MemOperand(cp)); - - // Push the function onto the stack. - __ Push(a2); - - __ lw(s0, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - __ lw(a0, - MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgcOffset)); - __ lw(s0, - MemOperand(s0, kPushedStackSpace + EntryFrameConstants::kArgvOffset)); - - // Check if we have enough stack space to push all arguments. - // Clobbers a2 and t0. - __ mov(t1, a0); - Generate_CheckStackOverflow(masm, t1, t0, t2); - - // Copy arguments to the stack. - // a0: argc - // s0: argv, i.e. points to first arg - Generate_PushArguments(masm, s0, a0, t2, t0, ArgumentsElementType::kHandle); - - // Push the receiver. - __ Push(a3); - - // a0: argc - // a1: function - // a3: new.target - __ mov(a3, a1); - __ mov(a1, a2); - - // Initialize all JavaScript callee-saved registers, since they will be seen - // by the garbage collector as part of handlers. - __ LoadRoot(t0, RootIndex::kUndefinedValue); - __ mov(s0, t0); - __ mov(s1, t0); - __ mov(s2, t0); - __ mov(s3, t0); - __ mov(s4, t0); - __ mov(s5, t0); - // s6 holds the root address. Do not clobber. - // s7 is cp. Do not init. - - // Invoke the code. - Handle builtin = is_construct - ? BUILTIN_CODE(masm->isolate(), Construct) - : masm->isolate()->builtins()->Call(); - __ Call(builtin, RelocInfo::CODE_TARGET); - - // Leave internal frame. - } - - __ Jump(ra); -} - -void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) { - Generate_JSEntryTrampolineHelper(masm, false); -} - -void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) { - Generate_JSEntryTrampolineHelper(masm, true); -} - -void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) { - // a1: microtask_queue - __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1); - __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET); -} - -static void AssertCodeIsBaseline(MacroAssembler* masm, Register code, - Register scratch) { - DCHECK(!AreAliased(code, scratch)); - // Verify that the code kind is baseline code via the CodeKind. - __ lw(scratch, FieldMemOperand(code, Code::kFlagsOffset)); - __ DecodeField(scratch); - __ Assert(eq, AbortReason::kExpectedBaselineData, scratch, - Operand(static_cast(CodeKind::BASELINE))); -} - -static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, - Register sfi_data, - Register scratch1, - Label* is_baseline) { - ASM_CODE_COMMENT(masm); - Label done; - - __ GetObjectType(sfi_data, scratch1, scratch1); - if (v8_flags.debug_code) { - Label not_baseline; - __ Branch(¬_baseline, ne, scratch1, Operand(CODET_TYPE)); - AssertCodeIsBaseline(masm, sfi_data, scratch1); - __ Branch(is_baseline); - __ bind(¬_baseline); - } else { - __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE)); - } - __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); - __ lw(sfi_data, - FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); - - __ bind(&done); -} - -// static -void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- v0 : the value to pass to the generator - // -- a1 : the JSGeneratorObject to resume - // -- ra : return address - // ----------------------------------- - - // Store input value into generator object. - __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); - __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3, - kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore); - - // Check that a1 is still valid, RecordWrite might have clobbered it. - __ AssertGeneratorObject(a1); - - // Load suspended function and context. - __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); - __ lw(cp, FieldMemOperand(t0, JSFunction::kContextOffset)); - - // Flood function if we are stepping. - Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator; - Label stepping_prepared; - ExternalReference debug_hook = - ExternalReference::debug_hook_on_function_call_address(masm->isolate()); - __ li(t1, debug_hook); - __ lb(t1, MemOperand(t1)); - __ Branch(&prepare_step_in_if_stepping, ne, t1, Operand(zero_reg)); - - // Flood function if we need to continue stepping in the suspended generator. - ExternalReference debug_suspended_generator = - ExternalReference::debug_suspended_generator_address(masm->isolate()); - __ li(t1, debug_suspended_generator); - __ lw(t1, MemOperand(t1)); - __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(t1)); - __ bind(&stepping_prepared); - - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - Label stack_overflow; - __ LoadStackLimit(kScratchReg, - MacroAssembler::StackLimitKind::kRealStackLimit); - __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg)); - - // ----------- S t a t e ------------- - // -- a1 : the JSGeneratorObject to resume - // -- t0 : generator function - // -- cp : generator context - // -- ra : return address - // ----------------------------------- - - // Copy the function arguments from the generator object's register file. - - __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); - __ lhu(a3, - FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); - __ Subu(a3, a3, Operand(kJSArgcReceiverSlots)); - __ lw(t1, - FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset)); - { - Label done_loop, loop; - __ bind(&loop); - __ Subu(a3, a3, Operand(1)); - __ Branch(&done_loop, lt, a3, Operand(zero_reg)); - __ Lsa(kScratchReg, t1, a3, kPointerSizeLog2); - __ Lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); - __ Push(kScratchReg); - __ Branch(&loop); - __ bind(&done_loop); - // Push receiver. - __ Lw(kScratchReg, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); - __ Push(kScratchReg); - } - - // Underlying function needs to have bytecode available. - if (v8_flags.debug_code) { - Label is_baseline; - __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); - __ lw(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); - GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline); - __ GetObjectType(a3, a3, a3); - __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, - Operand(BYTECODE_ARRAY_TYPE)); - __ bind(&is_baseline); - } - - // Resume (Ignition/TurboFan) generator object. - { - __ lw(a0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); - __ lhu(a0, FieldMemOperand( - a0, SharedFunctionInfo::kFormalParameterCountOffset)); - // We abuse new.target both to indicate that this is a resume call and to - // pass in the generator object. In ordinary calls, new.target is always - // undefined because generator functions are non-constructable. - __ Move(a3, a1); - __ Move(a1, t0); - static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - __ lw(a2, FieldMemOperand(a1, JSFunction::kCodeOffset)); - __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag); - __ Jump(a2); - } - - __ bind(&prepare_step_in_if_stepping); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(a1, t0); - // Push hole as receiver since we do not use it for stepping. - __ PushRoot(RootIndex::kTheHoleValue); - __ CallRuntime(Runtime::kDebugOnFunctionCall); - __ Pop(a1); - } - __ Branch(USE_DELAY_SLOT, &stepping_prepared); - __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); - - __ bind(&prepare_step_in_suspended_generator); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(a1); - __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator); - __ Pop(a1); - } - __ Branch(USE_DELAY_SLOT, &stepping_prepared); - __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); - - __ bind(&stack_overflow); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kThrowStackOverflow); - __ break_(0xCC); // This should be unreachable. - } -} - -static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, - Register optimized_code, - Register closure, - Register scratch1, - Register scratch2) { - ASM_CODE_COMMENT(masm); - // Store code entry in the closure. - __ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); - __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. - __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, - kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore, - RememberedSetAction::kOmit, SmiCheck::kOmit); -} - -static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, - Register scratch2) { - ASM_CODE_COMMENT(masm); - Register params_size = scratch1; - - // Get the size of the formal parameters + receiver (in bytes). - __ lw(params_size, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ lw(params_size, - FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset)); - - Register actual_params_size = scratch2; - // Compute the size of the actual parameters + receiver (in bytes). - __ Lw(actual_params_size, - MemOperand(fp, StandardFrameConstants::kArgCOffset)); - __ sll(actual_params_size, actual_params_size, kPointerSizeLog2); - - // If actual is bigger than formal, then we should use it to free up the stack - // arguments. - __ slt(t2, params_size, actual_params_size); - __ movn(params_size, actual_params_size, t2); - - // Leave the frame (also dropping the register file). - __ LeaveFrame(StackFrame::INTERPRETED); - - // Drop receiver + arguments. - __ DropArguments(params_size, TurboAssembler::kCountIsBytes, - TurboAssembler::kCountIncludesReceiver); -} - -// Tail-call |function_id| if |actual_state| == |expected_state| -static void TailCallRuntimeIfStateEquals(MacroAssembler* masm, - Register actual_state, - TieringState expected_state, - Runtime::FunctionId function_id) { - ASM_CODE_COMMENT(masm); - Label no_match; - __ Branch(&no_match, ne, actual_state, - Operand(static_cast(expected_state))); - GenerateTailCallToReturnedCode(masm, function_id); - __ bind(&no_match); -} - -static void TailCallOptimizedCodeSlot(MacroAssembler* masm, - Register optimized_code_entry, - Register scratch1, Register scratch2) { - // ----------- S t a t e ------------- - // -- a0 : actual argument count - // -- a3 : new target (preserved for callee if needed, and caller) - // -- a1 : target function (preserved for callee if needed, and caller) - // ----------------------------------- - DCHECK(!AreAliased(optimized_code_entry, a1, a3, scratch1, scratch2)); - - Register closure = a1; - Label heal_optimized_code_slot; - - // If the optimized code is cleared, go to runtime to update the optimization - // marker field. - __ LoadWeakValue(optimized_code_entry, optimized_code_entry, - &heal_optimized_code_slot); - - // Check if the optimized code is marked for deopt. If it is, call the - // runtime to clear it. - __ TestCodeTIsMarkedForDeoptimizationAndJump(optimized_code_entry, scratch1, - ne, &heal_optimized_code_slot); - - // Optimized code is good, get it into the closure and link the closure into - // the optimized functions list, then tail call the optimized code. - // The feedback vector is no longer used, so re-use it as a scratch - // register. - ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure, - scratch1, scratch2); - static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - __ Addu(a2, optimized_code_entry, Code::kHeaderSize - kHeapObjectTag); - __ Jump(a2); - - // Optimized code slot contains deoptimized code or code is cleared and - // optimized code marker isn't updated. Evict the code, update the marker - // and re-enter the closure's code. - __ bind(&heal_optimized_code_slot); - GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot); -} - -static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector, - Register tiering_state) { - // ----------- S t a t e ------------- - // -- a0 : actual argument count - // -- a3 : new target (preserved for callee if needed, and caller) - // -- a1 : target function (preserved for callee if needed, and caller) - // -- feedback vector (preserved for caller if needed) - // -- tiering_state : a int32 containing a non-zero optimization - // marker. - // ----------------------------------- - ASM_CODE_COMMENT(masm); - DCHECK(!AreAliased(feedback_vector, a1, a3, tiering_state)); - - TailCallRuntimeIfStateEquals(masm, tiering_state, - TieringState::kRequestTurbofan_Synchronous, - Runtime::kCompileTurbofan_Synchronous); - TailCallRuntimeIfStateEquals(masm, tiering_state, - TieringState::kRequestTurbofan_Concurrent, - Runtime::kCompileTurbofan_Concurrent); - - __ stop(); -} - -// Advance the current bytecode offset. This simulates what all bytecode -// handlers do upon completion of the underlying operation. Will bail out to a -// label if the bytecode (without prefix) is a return bytecode. Will not advance -// the bytecode offset if the current bytecode is a JumpLoop, instead just -// re-executing the JumpLoop to jump to the correct bytecode. -static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, - Register bytecode_array, - Register bytecode_offset, - Register bytecode, Register scratch1, - Register scratch2, Register scratch3, - Label* if_return) { - ASM_CODE_COMMENT(masm); - Register bytecode_size_table = scratch1; - - // The bytecode offset value will be increased by one in wide and extra wide - // cases. In the case of having a wide or extra wide JumpLoop bytecode, we - // will restore the original bytecode. In order to simplify the code, we have - // a backup of it. - Register original_bytecode_offset = scratch3; - DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode, - bytecode_size_table, original_bytecode_offset)); - __ Move(original_bytecode_offset, bytecode_offset); - __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address()); - - // Check if the bytecode is a Wide or ExtraWide prefix bytecode. - Label process_bytecode, extra_wide; - static_assert(0 == static_cast(interpreter::Bytecode::kWide)); - static_assert(1 == static_cast(interpreter::Bytecode::kExtraWide)); - static_assert(2 == static_cast(interpreter::Bytecode::kDebugBreakWide)); - static_assert(3 == - static_cast(interpreter::Bytecode::kDebugBreakExtraWide)); - __ Branch(&process_bytecode, hi, bytecode, Operand(3)); - __ And(scratch2, bytecode, Operand(1)); - __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg)); - - // Load the next bytecode and update table to the wide scaled table. - __ Addu(bytecode_offset, bytecode_offset, Operand(1)); - __ Addu(scratch2, bytecode_array, bytecode_offset); - __ lbu(bytecode, MemOperand(scratch2)); - __ Addu(bytecode_size_table, bytecode_size_table, - Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount)); - __ jmp(&process_bytecode); - - __ bind(&extra_wide); - // Load the next bytecode and update table to the extra wide scaled table. - __ Addu(bytecode_offset, bytecode_offset, Operand(1)); - __ Addu(scratch2, bytecode_array, bytecode_offset); - __ lbu(bytecode, MemOperand(scratch2)); - __ Addu(bytecode_size_table, bytecode_size_table, - Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount)); - - __ bind(&process_bytecode); - -// Bailout to the return label if this is a return bytecode. -#define JUMP_IF_EQUAL(NAME) \ - __ Branch(if_return, eq, bytecode, \ - Operand(static_cast(interpreter::Bytecode::k##NAME))); - RETURN_BYTECODE_LIST(JUMP_IF_EQUAL) -#undef JUMP_IF_EQUAL - - // If this is a JumpLoop, re-execute it to perform the jump to the beginning - // of the loop. - Label end, not_jump_loop; - __ Branch(¬_jump_loop, ne, bytecode, - Operand(static_cast(interpreter::Bytecode::kJumpLoop))); - // We need to restore the original bytecode_offset since we might have - // increased it to skip the wide / extra-wide prefix bytecode. - __ Move(bytecode_offset, original_bytecode_offset); - __ jmp(&end); - - __ bind(¬_jump_loop); - // Otherwise, load the size of the current bytecode and advance the offset. - __ Addu(scratch2, bytecode_size_table, bytecode); - __ lb(scratch2, MemOperand(scratch2)); - __ Addu(bytecode_offset, bytecode_offset, scratch2); - - __ bind(&end); -} - -// Read off the flags in the feedback vector and check if there -// is optimized code or a tiering state that needs to be processed. -static void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( - MacroAssembler* masm, Register flags, Register feedback_vector, - CodeKind current_code_kind, Label* flags_need_processing) { - ASM_CODE_COMMENT(masm); - Register scratch = t6; - __ lhu(flags, FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); - uint32_t kFlagsMask = FeedbackVector::kFlagsTieringStateIsAnyRequested | - FeedbackVector::kFlagsMaybeHasTurbofanCode | - FeedbackVector::kFlagsLogNextExecution; - if (current_code_kind != CodeKind::MAGLEV) { - kFlagsMask |= FeedbackVector::kFlagsMaybeHasMaglevCode; - } - __ And(scratch, flags, Operand(kFlagsMask)); - __ Branch(flags_need_processing, ne, scratch, Operand(zero_reg)); -} - -static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( - MacroAssembler* masm, Register flags, Register feedback_vector) { - ASM_CODE_COMMENT(masm); - Label maybe_has_optimized_code; - // Check if optimized code marker is available - { - UseScratchRegisterScope temps(masm); - Register scratch = temps.Acquire(); - __ And(scratch, flags, - Operand(FeedbackVector::kFlagsTieringStateIsAnyRequested)); - __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg)); - } - - Register tiering_state = flags; - __ DecodeField(tiering_state); - MaybeOptimizeCode(masm, feedback_vector, tiering_state); - - __ bind(&maybe_has_optimized_code); - Register optimized_code_entry = flags; - __ Lw(tiering_state, - FieldMemOperand(feedback_vector, - FeedbackVector::kMaybeOptimizedCodeOffset)); - - TailCallOptimizedCodeSlot(masm, optimized_code_entry, t1, t3); -} - -namespace { -void ResetBytecodeAge(MacroAssembler* masm, Register bytecode_array) { - __ sh(zero_reg, - FieldMemOperand(bytecode_array, BytecodeArray::kBytecodeAgeOffset)); -} - -void ResetFeedbackVectorOsrUrgency(MacroAssembler* masm, - Register feedback_vector, Register scratch) { - DCHECK(!AreAliased(feedback_vector, scratch)); - __ lbu(scratch, - FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset)); - __ And(scratch, scratch, - Operand(FeedbackVector::MaybeHasOptimizedOsrCodeBit::kMask)); - __ sb(scratch, - FieldMemOperand(feedback_vector, FeedbackVector::kOsrStateOffset)); -} - -} // namespace - -// static -void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { - UseScratchRegisterScope temps(masm); - temps.Include({s1, s2}); - auto descriptor = - Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue); - Register closure = descriptor.GetRegisterParameter( - BaselineOutOfLinePrologueDescriptor::kClosure); - // Load the feedback vector from the closure. - Register feedback_vector = temps.Acquire(); - __ Lw(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ Lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); - if (v8_flags.debug_code) { - UseScratchRegisterScope temps(masm); - Register scratch = temps.Acquire(); - __ GetObjectType(feedback_vector, scratch, scratch); - __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch, - Operand(FEEDBACK_VECTOR_TYPE)); - } - // Check for an tiering state. - Label flags_need_processing; - Register flags = no_reg; - { - UseScratchRegisterScope temps(masm); - flags = temps.Acquire(); - // flags will be used only in |flags_need_processing| - // and outside it can be reused. - LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( - masm, flags, feedback_vector, &flags_need_processing); - } - { - UseScratchRegisterScope temps(masm); - ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire()); - } - // Increment invocation count for the function. - { - UseScratchRegisterScope temps(masm); - Register invocation_count = temps.Acquire(); - __ Lw(invocation_count, - FieldMemOperand(feedback_vector, - FeedbackVector::kInvocationCountOffset)); - __ Addu(invocation_count, invocation_count, Operand(1)); - __ Sw(invocation_count, - FieldMemOperand(feedback_vector, - FeedbackVector::kInvocationCountOffset)); - } - - FrameScope frame_scope(masm, StackFrame::MANUAL); - { - ASM_CODE_COMMENT_STRING(masm, "Frame Setup"); - // Normally the first thing we'd do here is Push(ra, fp), but we already - // entered the frame in BaselineCompiler::Prologue, as we had to use the - // value ra before the call to this BaselineOutOfLinePrologue builtin. - Register callee_context = descriptor.GetRegisterParameter( - BaselineOutOfLinePrologueDescriptor::kCalleeContext); - Register callee_js_function = descriptor.GetRegisterParameter( - BaselineOutOfLinePrologueDescriptor::kClosure); - __ Push(callee_context, callee_js_function); - DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); - DCHECK_EQ(callee_js_function, kJSFunctionRegister); - - Register argc = descriptor.GetRegisterParameter( - BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount); - // We'll use the bytecode for both code age/OSR resetting, and pushing onto - // the frame, so load it into a register. - Register bytecode_array = descriptor.GetRegisterParameter( - BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); - ResetBytecodeAge(masm, bytecode_array); - __ Push(argc, bytecode_array); - - // Baseline code frames store the feedback vector where interpreter would - // store the bytecode offset. - if (v8_flags.debug_code) { - UseScratchRegisterScope temps(masm); - Register invocation_count = temps.Acquire(); - __ GetObjectType(feedback_vector, invocation_count, invocation_count); - __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count, - Operand(FEEDBACK_VECTOR_TYPE)); - } - // Our stack is currently aligned. We have have to push something along with - // the feedback vector to keep it that way -- we may as well start - // initialising the register frame. - // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves - // `undefined` in the accumulator register, to skip the load in the baseline - // code. - __ Push(feedback_vector); - } - - Label call_stack_guard; - Register frame_size = descriptor.GetRegisterParameter( - BaselineOutOfLinePrologueDescriptor::kStackFrameSize); - { - ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check"); - // Stack check. This folds the checks for both the interrupt stack limit - // check and the real stack limit into one by just checking for the - // interrupt limit. The interrupt limit is either equal to the real stack - // limit or tighter. By ensuring we have space until that limit after - // building the frame we can quickly precheck both at once. - UseScratchRegisterScope temps(masm); - Register sp_minus_frame_size = temps.Acquire(); - __ Subu(sp_minus_frame_size, sp, frame_size); - Register interrupt_limit = temps.Acquire(); - __ LoadStackLimit(interrupt_limit, - MacroAssembler::StackLimitKind::kInterruptStackLimit); - __ Branch(&call_stack_guard, Uless, sp_minus_frame_size, - Operand(interrupt_limit)); - } - - // Do "fast" return to the caller pc in ra. - // TODO(v8:11429): Document this frame setup better. - __ Ret(); - - __ bind(&flags_need_processing); - { - ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); - UseScratchRegisterScope temps(masm); - temps.Exclude(flags); - // Ensure the flags is not allocated again. - // Drop the frame created by the baseline call. - __ Pop(ra, fp); - MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, flags, feedback_vector); - __ Trap(); - } - - __ bind(&call_stack_guard); - { - ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call"); - FrameScope frame_scope(masm, StackFrame::INTERNAL); - // Save incoming new target or generator - __ Push(kJavaScriptCallNewTargetRegister); - __ SmiTag(frame_size); - __ Push(frame_size); - __ CallRuntime(Runtime::kStackGuardWithGap); - __ Pop(kJavaScriptCallNewTargetRegister); - } - __ Ret(); - temps.Exclude({kScratchReg, kScratchReg2}); -} - -// Generate code for entering a JS function with the interpreter. -// On entry to the function the receiver and arguments have been pushed on the -// stack left to right. -// -// The live registers are: -// o a0 : actual argument count -// o a1: the JS function object being called. -// o a3: the incoming new target or generator object -// o cp: our context -// o fp: the caller's frame pointer -// o sp: stack pointer -// o ra: return address -// -// The function builds an interpreter frame. See InterpreterFrameConstants in -// frame-constants.h for its layout. -void Builtins::Generate_InterpreterEntryTrampoline( - MacroAssembler* masm, InterpreterEntryTrampolineMode mode) { - Register closure = a1; - Register feedback_vector = a2; - - // Get the bytecode array from the function object and load it into - // kInterpreterBytecodeArrayRegister. - __ lw(kScratchReg, - FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ lw(kInterpreterBytecodeArrayRegister, - FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); - Label is_baseline; - GetSharedFunctionInfoBytecodeOrBaseline( - masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline); - - // The bytecode array could have been flushed from the shared function info, - // if so, call into CompileLazy. - Label compile_lazy; - __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg); - __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE)); - - // Load the feedback vector from the closure. - __ lw(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); - - Label push_stack_frame; - // Check if feedback vector is valid. If valid, check for optimized code - // and update invocation count. Otherwise, setup the stack frame. - __ lw(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ lhu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); - __ Branch(&push_stack_frame, ne, t0, Operand(FEEDBACK_VECTOR_TYPE)); - - // Check the tiering state. - Label flags_need_processing; - Register flags = t0; - LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing(masm, flags, feedback_vector, - &flags_need_processing); - - { - UseScratchRegisterScope temps(masm); - ResetFeedbackVectorOsrUrgency(masm, feedback_vector, temps.Acquire()); - } - - Label not_optimized; - __ bind(¬_optimized); - - // Increment invocation count for the function. - __ lw(t0, FieldMemOperand(feedback_vector, - FeedbackVector::kInvocationCountOffset)); - __ Addu(t0, t0, Operand(1)); - __ sw(t0, FieldMemOperand(feedback_vector, - FeedbackVector::kInvocationCountOffset)); - - // Open a frame scope to indicate that there is a frame on the stack. The - // MANUAL indicates that the scope shouldn't actually generate code to set up - // the frame (that is done below). - __ bind(&push_stack_frame); - FrameScope frame_scope(masm, StackFrame::MANUAL); - __ PushStandardFrame(closure); - - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); - - // Load initial bytecode offset. - __ li(kInterpreterBytecodeOffsetRegister, - Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); - - // Push bytecode array and Smi tagged bytecode array offset. - __ SmiTag(t0, kInterpreterBytecodeOffsetRegister); - __ Push(kInterpreterBytecodeArrayRegister, t0); - - // Allocate the local and temporary register file on the stack. - Label stack_overflow; - { - // Load frame size from the BytecodeArray object. - __ lw(t0, FieldMemOperand(kInterpreterBytecodeArrayRegister, - BytecodeArray::kFrameSizeOffset)); - - // Do a stack check to ensure we don't go over the limit. - __ Subu(t1, sp, Operand(t0)); - __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit); - __ Branch(&stack_overflow, lo, t1, Operand(a2)); - - // If ok, push undefined as the initial value for all register file entries. - Label loop_header; - Label loop_check; - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); - __ Branch(&loop_check); - __ bind(&loop_header); - // TODO(rmcilroy): Consider doing more than one push per loop iteration. - __ push(kInterpreterAccumulatorRegister); - // Continue loop if not done. - __ bind(&loop_check); - __ Subu(t0, t0, Operand(kPointerSize)); - __ Branch(&loop_header, ge, t0, Operand(zero_reg)); - } - - // If the bytecode array has a valid incoming new target or generator object - // register, initialize it with incoming value which was passed in r3. - Label no_incoming_new_target_or_generator_register; - __ lw(t1, FieldMemOperand( - kInterpreterBytecodeArrayRegister, - BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); - __ Branch(&no_incoming_new_target_or_generator_register, eq, t1, - Operand(zero_reg)); - __ Lsa(t1, fp, t1, kPointerSizeLog2); - __ sw(a3, MemOperand(t1)); - __ bind(&no_incoming_new_target_or_generator_register); - - // Perform interrupt stack check. - // TODO(solanes): Merge with the real stack limit check above. - Label stack_check_interrupt, after_stack_check_interrupt; - __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kInterruptStackLimit); - __ Branch(&stack_check_interrupt, lo, sp, Operand(a2)); - __ bind(&after_stack_check_interrupt); - - // The accumulator is already loaded with undefined. - - // Load the dispatch table into a register and dispatch to the bytecode - // handler at the current bytecode offset. - Label do_dispatch; - __ bind(&do_dispatch); - __ li(kInterpreterDispatchTableRegister, - ExternalReference::interpreter_dispatch_table_address(masm->isolate())); - __ Addu(a0, kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister); - __ lbu(t3, MemOperand(a0)); - __ Lsa(kScratchReg, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2); - __ lw(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg)); - __ Call(kJavaScriptCallCodeStartRegister); - - __ RecordComment("--- InterpreterEntryReturnPC point ---"); - if (mode == InterpreterEntryTrampolineMode::kDefault) { - masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset( - masm->pc_offset()); - } else { - DCHECK_EQ(mode, InterpreterEntryTrampolineMode::kForProfiling); - // Both versions must be the same up to this point otherwise the builtins - // will not be interchangable. - CHECK_EQ( - masm->isolate()->heap()->interpreter_entry_return_pc_offset().value(), - masm->pc_offset()); - } - - // Any returns to the entry trampoline are either due to the return bytecode - // or the interpreter tail calling a builtin and then a dispatch. - - // Get bytecode array and bytecode offset from the stack frame. - __ lw(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ lw(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); - // Either return, or advance to the next bytecode and dispatch. - Label do_return; - __ Addu(a1, kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister); - __ lbu(a1, MemOperand(a1)); - AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister, a1, a2, a3, - t0, &do_return); - __ jmp(&do_dispatch); - - __ bind(&do_return); - // The return value is in v0. - LeaveInterpreterFrame(masm, t0, t1); - __ Jump(ra); - - __ bind(&stack_check_interrupt); - // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset - // for the call to the StackGuard. - __ li(kInterpreterBytecodeOffsetRegister, - Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag + - kFunctionEntryBytecodeOffset))); - __ Sw(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ CallRuntime(Runtime::kStackGuard); - - // After the call, restore the bytecode array, bytecode offset and accumulator - // registers again. Also, restore the bytecode offset in the stack to its - // previous value. - __ Lw(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ li(kInterpreterBytecodeOffsetRegister, - Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); - __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); - - __ SmiTag(a2, kInterpreterBytecodeOffsetRegister); - __ Sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - - __ jmp(&after_stack_check_interrupt); - - __ bind(&flags_need_processing); - MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, flags, feedback_vector); - __ bind(&is_baseline); - { - // Load the feedback vector from the closure. - __ Lw(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ Lw(feedback_vector, - FieldMemOperand(feedback_vector, Cell::kValueOffset)); - - Label install_baseline_code; - // Check if feedback vector is valid. If not, call prepare for baseline to - // allocate it. - __ Lw(t4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); - __ lhu(t4, FieldMemOperand(t4, Map::kInstanceTypeOffset)); - __ Branch(&install_baseline_code, ne, t4, Operand(FEEDBACK_VECTOR_TYPE)); - - // Check for an tiering state. - LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( - masm, flags, feedback_vector, &flags_need_processing); - - // Load the baseline code into the closure. - __ Move(a2, kInterpreterBytecodeArrayRegister); - static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t4, t5); - __ JumpCodeObject(a2); - - __ bind(&install_baseline_code); - GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode); - } - - __ bind(&compile_lazy); - GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); - // Unreachable code. - __ break_(0xCC); - - __ bind(&stack_overflow); - __ CallRuntime(Runtime::kThrowStackOverflow); - // Unreachable code. - __ break_(0xCC); -} - -static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args, - Register start_address, - Register scratch, Register scratch2) { - ASM_CODE_COMMENT(masm); - // Find the address of the last argument. - __ Subu(scratch, num_args, Operand(1)); - __ sll(scratch, scratch, kPointerSizeLog2); - __ Subu(start_address, start_address, scratch); - - // Push the arguments. - __ PushArray(start_address, num_args, scratch, scratch2, - TurboAssembler::PushArrayOrder::kReverse); -} - -// static -void Builtins::Generate_InterpreterPushArgsThenCallImpl( - MacroAssembler* masm, ConvertReceiverMode receiver_mode, - InterpreterPushArgsMode mode) { - DCHECK(mode != InterpreterPushArgsMode::kArrayFunction); - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a2 : the address of the first argument to be pushed. Subsequent - // arguments should be consecutive above this, in the same order as - // they are to be pushed onto the stack. - // -- a1 : the target to call (can be any Object). - // ----------------------------------- - Label stack_overflow; - if (mode == InterpreterPushArgsMode::kWithFinalSpread) { - // The spread argument should not be pushed. - __ Subu(a0, a0, Operand(1)); - } - - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - __ Subu(t0, a0, Operand(kJSArgcReceiverSlots)); - } else { - __ mov(t0, a0); - } - - __ StackOverflowCheck(t0, t4, t1, &stack_overflow); - - // This function modifies a2, t4 and t1. - GenerateInterpreterPushArgs(masm, t0, a2, t4, t1); - - if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) { - __ PushRoot(RootIndex::kUndefinedValue); - } - - if (mode == InterpreterPushArgsMode::kWithFinalSpread) { - // Pass the spread in the register a2. - // a2 already points to the penultime argument, the spread - // is below that. - __ Lw(a2, MemOperand(a2, -kSystemPointerSize)); - } - - // Call the target. - if (mode == InterpreterPushArgsMode::kWithFinalSpread) { - __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread), - RelocInfo::CODE_TARGET); - } else { - __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny), - RelocInfo::CODE_TARGET); - } - - __ bind(&stack_overflow); - { - __ TailCallRuntime(Runtime::kThrowStackOverflow); - // Unreachable code. - __ break_(0xCC); - } -} - -// static -void Builtins::Generate_InterpreterPushArgsThenConstructImpl( - MacroAssembler* masm, InterpreterPushArgsMode mode) { - // ----------- S t a t e ------------- - // -- a0 : argument count - // -- a3 : new target - // -- a1 : constructor to call - // -- a2 : allocation site feedback if available, undefined otherwise. - // -- t4 : address of the first argument - // ----------------------------------- - Label stack_overflow; - __ StackOverflowCheck(a0, t1, t0, &stack_overflow); - - if (mode == InterpreterPushArgsMode::kWithFinalSpread) { - // The spread argument should not be pushed. - __ Subu(a0, a0, Operand(1)); - } - - Register argc_without_receiver = t2; - __ Subu(argc_without_receiver, a0, Operand(kJSArgcReceiverSlots)); - - GenerateInterpreterPushArgs(masm, argc_without_receiver, t4, t1, t0); - - // Push a slot for the receiver. - __ push(zero_reg); - - if (mode == InterpreterPushArgsMode::kWithFinalSpread) { - // Pass the spread in the register a2. - // t4 already points to the penultimate argument, the spread - // lies in the next interpreter register. - // __ Subu(t4, t4, Operand(kSystemPointerSize)); - __ Lw(a2, MemOperand(t4, -kSystemPointerSize)); - } else { - __ AssertUndefinedOrAllocationSite(a2, t0); - } - - if (mode == InterpreterPushArgsMode::kArrayFunction) { - __ AssertFunction(a1); - - // Tail call to the array construct stub (still in the caller - // context at this point). - __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl), - RelocInfo::CODE_TARGET); - } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) { - // Call the constructor with a0, a1, and a3 unmodified. - __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread), - RelocInfo::CODE_TARGET); - } else { - DCHECK_EQ(InterpreterPushArgsMode::kOther, mode); - // Call the constructor with a0, a1, and a3 unmodified. - __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); - } - - __ bind(&stack_overflow); - { - __ TailCallRuntime(Runtime::kThrowStackOverflow); - // Unreachable code. - __ break_(0xCC); - } -} - -static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { - // Set the return address to the correct point in the interpreter entry - // trampoline. - Label builtin_trampoline, trampoline_loaded; - Smi interpreter_entry_return_pc_offset( - masm->isolate()->heap()->interpreter_entry_return_pc_offset()); - DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero()); - - // If the SFI function_data is an InterpreterData, the function will have a - // custom copy of the interpreter entry trampoline for profiling. If so, - // get the custom trampoline, otherwise grab the entry address of the global - // trampoline. - __ lw(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ lw(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); - __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset)); - __ GetObjectType(t0, kInterpreterDispatchTableRegister, - kInterpreterDispatchTableRegister); - __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister, - Operand(INTERPRETER_DATA_TYPE)); - - __ lw(t0, FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset)); - __ Addu(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag)); - __ Branch(&trampoline_loaded); - - __ bind(&builtin_trampoline); - __ li(t0, ExternalReference:: - address_of_interpreter_entry_trampoline_instruction_start( - masm->isolate())); - __ lw(t0, MemOperand(t0)); - - __ bind(&trampoline_loaded); - __ Addu(ra, t0, Operand(interpreter_entry_return_pc_offset.value())); - - // Initialize the dispatch table register. - __ li(kInterpreterDispatchTableRegister, - ExternalReference::interpreter_dispatch_table_address(masm->isolate())); - - // Get the bytecode array pointer from the frame. - __ lw(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - - if (v8_flags.debug_code) { - // Check function data field is actually a BytecodeArray object. - __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg); - __ Assert(ne, - AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, - kScratchReg, Operand(zero_reg)); - __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1); - __ Assert(eq, - AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, - a1, Operand(BYTECODE_ARRAY_TYPE)); - } - - // Get the target bytecode offset from the frame. - __ lw(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); - - if (v8_flags.debug_code) { - Label okay; - __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister, - Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); - // Unreachable code. - __ break_(0xCC); - __ bind(&okay); - } - - // Dispatch to the target bytecode. - __ Addu(a1, kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister); - __ lbu(t3, MemOperand(a1)); - __ Lsa(a1, kInterpreterDispatchTableRegister, t3, kPointerSizeLog2); - __ lw(kJavaScriptCallCodeStartRegister, MemOperand(a1)); - __ Jump(kJavaScriptCallCodeStartRegister); -} - -void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { - // Advance the current bytecode offset stored within the given interpreter - // stack frame. This simulates what all bytecode handlers do upon completion - // of the underlying operation. - __ lw(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ lw(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); - - Label enter_bytecode, function_entry_bytecode; - __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, - Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + - kFunctionEntryBytecodeOffset)); - - // Load the current bytecode. - __ Addu(a1, kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister); - __ lbu(a1, MemOperand(a1)); - - // Advance to the next bytecode. - Label if_return; - AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister, a1, a2, a3, - t0, &if_return); - - __ bind(&enter_bytecode); - // Convert new bytecode offset to a Smi and save in the stackframe. - __ SmiTag(a2, kInterpreterBytecodeOffsetRegister); - __ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - - Generate_InterpreterEnterBytecode(masm); - - __ bind(&function_entry_bytecode); - // If the code deoptimizes during the implicit function entry stack interrupt - // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is - // not a valid bytecode offset. Detect this case and advance to the first - // actual bytecode. - __ li(kInterpreterBytecodeOffsetRegister, - Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); - __ Branch(&enter_bytecode); - - // We should never take the if_return path. - __ bind(&if_return); - __ Abort(AbortReason::kInvalidBytecodeAdvance); -} - -void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { - Generate_InterpreterEnterBytecode(masm); -} - -namespace { -void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, - bool java_script_builtin, - bool with_result) { - const RegisterConfiguration* config(RegisterConfiguration::Default()); - int allocatable_register_count = config->num_allocatable_general_registers(); - UseScratchRegisterScope temps(masm); - Register scratch = temps.Acquire(); // Temp register is not allocatable. - // Register scratch = t3; - if (with_result) { - if (java_script_builtin) { - __ mov(scratch, v0); - } else { - // Overwrite the hole inserted by the deoptimizer with the return value - // from the LAZY deopt point. - __ sw(v0, - MemOperand( - sp, config->num_allocatable_general_registers() * kPointerSize + - BuiltinContinuationFrameConstants::kFixedFrameSize)); - } - } - for (int i = allocatable_register_count - 1; i >= 0; --i) { - int code = config->GetAllocatableGeneralCode(i); - __ Pop(Register::from_code(code)); - if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) { - __ SmiUntag(Register::from_code(code)); - } - } - - if (with_result && java_script_builtin) { - // Overwrite the hole inserted by the deoptimizer with the return value from - // the LAZY deopt point. t0 contains the arguments count, the return value - // from LAZY is always the last argument. - constexpr int return_value_offset = - BuiltinContinuationFrameConstants::kFixedSlotCount - - kJSArgcReceiverSlots; - __ Addu(a0, a0, Operand(return_value_offset)); - __ Lsa(t0, sp, a0, kSystemPointerSizeLog2); - __ Sw(scratch, MemOperand(t0)); - // Recover arguments count. - __ Subu(a0, a0, Operand(return_value_offset)); - } - - __ lw(fp, MemOperand( - sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); - // Load builtin index (stored as a Smi) and use it to get the builtin start - // address from the builtins table. - __ Pop(t0); - __ Addu(sp, sp, - Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); - __ Pop(ra); - __ LoadEntryFromBuiltinIndex(t0); - __ Jump(t0); -} -} // namespace - -void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) { - Generate_ContinueToBuiltinHelper(masm, false, false); -} - -void Builtins::Generate_ContinueToCodeStubBuiltinWithResult( - MacroAssembler* masm) { - Generate_ContinueToBuiltinHelper(masm, false, true); -} - -void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) { - Generate_ContinueToBuiltinHelper(masm, true, false); -} - -void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult( - MacroAssembler* masm) { - Generate_ContinueToBuiltinHelper(masm, true, true); -} - -void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kNotifyDeoptimized); - } - - DCHECK_EQ(kInterpreterAccumulatorRegister.code(), v0.code()); - __ lw(v0, MemOperand(sp, 0 * kPointerSize)); - __ Ret(USE_DELAY_SLOT); - // Safe to fill delay slot Addu will emit one instruction. - __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove accumulator. -} - -namespace { - -void Generate_OSREntry(MacroAssembler* masm, Register entry_address, - Operand offset = Operand(zero_reg)) { - __ Addu(ra, entry_address, offset); - // And "return" to the OSR entry point of the function. - __ Ret(); -} - -enum class OsrSourceTier { - kInterpreter, - kBaseline, -}; - -void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, - Register maybe_target_code) { - Label jump_to_optimized_code; - { - // If maybe_target_code is not null, no need to call into runtime. A - // precondition here is: if maybe_target_code is a Code object, it must NOT - // be marked_for_deoptimization (callers must ensure this). - __ Branch(&jump_to_optimized_code, ne, maybe_target_code, - Operand(Smi::zero())); - } - - ASM_CODE_COMMENT(masm); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ CallRuntime(Runtime::kCompileOptimizedOSR); - __ mov(maybe_target_code, v0); - } - - // If the code object is null, just return to the caller. - __ Ret(eq, maybe_target_code, Operand(Smi::zero())); - __ bind(&jump_to_optimized_code); - - if (source == OsrSourceTier::kInterpreter) { - // Drop the handler frame that is be sitting on top of the actual - // JavaScript frame. This is the case then OSR is triggered from bytecode. - __ LeaveFrame(StackFrame::STUB); - } - // Load deoptimization data from the code object. - // = [#deoptimization_data_offset] - __ lw(a1, MemOperand(maybe_target_code, - Code::kDeoptimizationDataOrInterpreterDataOffset - - kHeapObjectTag)); - - // Load the OSR entrypoint offset from the deoptimization data. - // = [#header_size + #osr_pc_offset] - __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( - DeoptimizationData::kOsrPcOffsetIndex) - - kHeapObjectTag)); - __ SmiUntag(a1); - - // Compute the target address = code_obj + header_size + osr_offset - // = + #header_size + - __ Addu(maybe_target_code, maybe_target_code, a1); - Generate_OSREntry(masm, maybe_target_code, - Operand(Code::kHeaderSize - kHeapObjectTag)); -} -} // namespace - -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { - using D = InterpreterOnStackReplacementDescriptor; - static_assert(D::kParameterCount == 1); - OnStackReplacement(masm, OsrSourceTier::kInterpreter, - D::MaybeTargetCodeRegister()); -} - -void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { - using D = BaselineOnStackReplacementDescriptor; - static_assert(D::kParameterCount == 1); - - __ Lw(kContextRegister, - MemOperand(fp, BaselineFrameConstants::kContextOffset)); - OnStackReplacement(masm, OsrSourceTier::kBaseline, - D::MaybeTargetCodeRegister()); -} - -// static -void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : argc - // -- sp[0] : receiver - // -- sp[4] : thisArg - // -- sp[8] : argArray - // ----------------------------------- - - // 1. Load receiver into a1, argArray into a2 (if present), remove all - // arguments from the stack (including the receiver), and push thisArg (if - // present) instead. - { - Label no_arg; - __ LoadRoot(a2, RootIndex::kUndefinedValue); - __ mov(a3, a2); - // Lsa() cannot be used hare as scratch value used later. - __ lw(a1, MemOperand(sp)); // receiver - __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0))); - __ lw(a3, MemOperand(sp, kSystemPointerSize)); // thisArg - __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1))); - __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize)); // argArray - __ bind(&no_arg); - __ DropArgumentsAndPushNewReceiver(a0, a3, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); - } - - // ----------- S t a t e ------------- - // -- a2 : argArray - // -- a1 : receiver - // -- sp[0] : thisArg - // ----------------------------------- - - // 2. We don't need to check explicitly for callable receiver here, - // since that's the first thing the Call/CallWithArrayLike builtins - // will do. - - // 3. Tail call with no arguments if argArray is null or undefined. - Label no_arguments; - __ JumpIfRoot(a2, RootIndex::kNullValue, &no_arguments); - __ JumpIfRoot(a2, RootIndex::kUndefinedValue, &no_arguments); - - // 4a. Apply the receiver to the given argArray. - __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), - RelocInfo::CODE_TARGET); - - // 4b. The argArray is either null or undefined, so we tail call without any - // arguments to the receiver. - __ bind(&no_arguments); - { - __ li(a0, JSParameterCount(0)); - __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); - } -} - -// static -void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) { - // 1. Get the callable to call (passed as receiver) from the stack. - __ Pop(a1); - - // 2. Make sure we have at least one argument. - // a0: actual number of arguments - { - Label done; - __ Branch(&done, ne, a0, Operand(JSParameterCount(0))); - __ PushRoot(RootIndex::kUndefinedValue); - __ Addu(a0, a0, Operand(1)); - __ bind(&done); - } - - // 3. Adjust the actual number of arguments. - __ addiu(a0, a0, -1); - - // 4. Call the callable. - __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); -} - -void Builtins::Generate_ReflectApply(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : argc - // -- sp[0] : receiver - // -- sp[4] : target (if argc >= 1) - // -- sp[8] : thisArgument (if argc >= 2) - // -- sp[12] : argumentsList (if argc == 3) - // ----------------------------------- - - // 1. Load target into a1 (if present), argumentsList into a0 (if present), - // remove all arguments from the stack (including the receiver), and push - // thisArgument (if present) instead. - { - Label no_arg; - __ LoadRoot(a1, RootIndex::kUndefinedValue); - __ mov(a2, a1); - __ mov(a3, a1); - __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0))); - __ lw(a1, MemOperand(sp, kSystemPointerSize)); // target - __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1))); - __ lw(a3, MemOperand(sp, 2 * kSystemPointerSize)); // thisArgument - __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2))); - __ lw(a2, MemOperand(sp, 3 * kSystemPointerSize)); // argumentsList - __ bind(&no_arg); - __ DropArgumentsAndPushNewReceiver(a0, a3, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); - } - - // ----------- S t a t e ------------- - // -- a2 : argumentsList - // -- a1 : target - // -- sp[0] : thisArgument - // ----------------------------------- - - // 2. We don't need to check explicitly for callable target here, - // since that's the first thing the Call/CallWithArrayLike builtins - // will do. - - // 3. Apply the target to the given argumentsList. - __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike), - RelocInfo::CODE_TARGET); -} - -void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : argc - // -- sp[0] : receiver - // -- sp[4] : target - // -- sp[8] : argumentsList - // -- sp[12] : new.target (optional) - // ----------------------------------- - - // 1. Load target into a1 (if present), argumentsList into a2 (if present), - // new.target into a3 (if present, otherwise use target), remove all - // arguments from the stack (including the receiver), and push thisArgument - // (if present) instead. - { - Label no_arg; - __ LoadRoot(a1, RootIndex::kUndefinedValue); - __ mov(a2, a1); - __ mov(t0, a1); - __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(0))); - __ lw(a1, MemOperand(sp, kSystemPointerSize)); // target - __ mov(a3, a1); // new.target defaults to target - __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(1))); - __ lw(a2, MemOperand(sp, 2 * kSystemPointerSize)); // argumentsList - __ Branch(&no_arg, eq, a0, Operand(JSParameterCount(2))); - __ lw(a3, MemOperand(sp, 3 * kSystemPointerSize)); // new.target - __ bind(&no_arg); - __ DropArgumentsAndPushNewReceiver(a0, t0, TurboAssembler::kCountIsInteger, - TurboAssembler::kCountIncludesReceiver); - } - - // ----------- S t a t e ------------- - // -- a2 : argumentsList - // -- a3 : new.target - // -- a1 : target - // -- sp[0] : receiver (undefined) - // ----------------------------------- - - // 2. We don't need to check explicitly for constructor target here, - // since that's the first thing the Construct/ConstructWithArrayLike - // builtins will do. - - // 3. We don't need to check explicitly for constructor new.target here, - // since that's the second thing the Construct/ConstructWithArrayLike - // builtins will do. - - // 4. Construct the target with the given new.target and argumentsList. - __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike), - RelocInfo::CODE_TARGET); -} - -namespace { - -// Allocate new stack space for |count| arguments and shift all existing -// arguments already on the stack. |pointer_to_new_space_out| points to the -// first free slot on the stack to copy additional arguments to and -// |argc_in_out| is updated to include |count|. -void Generate_AllocateSpaceAndShiftExistingArguments( - MacroAssembler* masm, Register count, Register argc_in_out, - Register pointer_to_new_space_out, Register scratch1, Register scratch2, - Register scratch3) { - DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1, - scratch2)); - Register old_sp = scratch1; - Register new_space = scratch2; - __ mov(old_sp, sp); - __ sll(new_space, count, kPointerSizeLog2); - __ Subu(sp, sp, Operand(new_space)); - - Register end = scratch2; - Register value = scratch3; - Register dest = pointer_to_new_space_out; - __ mov(dest, sp); - __ Lsa(end, old_sp, argc_in_out, kSystemPointerSizeLog2); - Label loop, done; - __ Branch(&done, ge, old_sp, Operand(end)); - __ bind(&loop); - __ lw(value, MemOperand(old_sp, 0)); - __ sw(value, MemOperand(dest, 0)); - __ Addu(old_sp, old_sp, Operand(kSystemPointerSize)); - __ Addu(dest, dest, Operand(kSystemPointerSize)); - __ Branch(&loop, lt, old_sp, Operand(end)); - __ bind(&done); - - // Update total number of arguments. - __ Addu(argc_in_out, argc_in_out, count); -} - -} // namespace - -// static -void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, - Handle code) { - // ----------- S t a t e ------------- - // -- a1 : target - // -- a0 : number of parameters on the stack - // -- a2 : arguments list (a FixedArray) - // -- t0 : len (number of elements to push from args) - // -- a3 : new.target (for [[Construct]]) - // ----------------------------------- - if (v8_flags.debug_code) { - // Allow a2 to be a FixedArray, or a FixedDoubleArray if t0 == 0. - Label ok, fail; - __ AssertNotSmi(a2); - __ GetObjectType(a2, t8, t8); - __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE)); - __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE)); - __ Branch(&ok, eq, t0, Operand(0)); - // Fall through. - __ bind(&fail); - __ Abort(AbortReason::kOperandIsNotAFixedArray); - - __ bind(&ok); - } - - // Check for stack overflow. - Label stack_overflow; - __ StackOverflowCheck(t0, kScratchReg, t1, &stack_overflow); - - // Move the arguments already in the stack, - // including the receiver and the return address. - // t0: Number of arguments to make room for. - // a0: Number of arguments already on the stack. - // t4: Points to first free slot on the stack after arguments were shifted. - Generate_AllocateSpaceAndShiftExistingArguments(masm, t0, a0, t4, t3, t1, t2); - - // Push arguments onto the stack (thisArgument is already on the stack). - { - __ mov(t2, zero_reg); - Label done, push, loop; - __ LoadRoot(t1, RootIndex::kTheHoleValue); - __ bind(&loop); - __ Branch(&done, eq, t2, Operand(t0)); - __ Lsa(kScratchReg, a2, t2, kPointerSizeLog2); - __ lw(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize)); - __ Addu(t2, t2, Operand(1)); - __ Branch(&push, ne, t1, Operand(kScratchReg)); - __ LoadRoot(kScratchReg, RootIndex::kUndefinedValue); - __ bind(&push); - __ Sw(kScratchReg, MemOperand(t4, 0)); - __ Addu(t4, t4, Operand(kSystemPointerSize)); - __ Branch(&loop); - __ bind(&done); - } - - // Tail-call to the actual Call or Construct builtin. - __ Jump(code, RelocInfo::CODE_TARGET); - - __ bind(&stack_overflow); - __ TailCallRuntime(Runtime::kThrowStackOverflow); -} - -// static -void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, - CallOrConstructMode mode, - Handle code) { - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a3 : the new.target (for [[Construct]] calls) - // -- a1 : the target to call (can be any Object) - // -- a2 : start index (to support rest parameters) - // ----------------------------------- - - // Check if new.target has a [[Construct]] internal method. - if (mode == CallOrConstructMode::kConstruct) { - Label new_target_constructor, new_target_not_constructor; - __ JumpIfSmi(a3, &new_target_not_constructor); - __ lw(t1, FieldMemOperand(a3, HeapObject::kMapOffset)); - __ lbu(t1, FieldMemOperand(t1, Map::kBitFieldOffset)); - __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask)); - __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg)); - __ bind(&new_target_not_constructor); - { - FrameScope scope(masm, StackFrame::MANUAL); - __ EnterFrame(StackFrame::INTERNAL); - __ Push(a3); - __ CallRuntime(Runtime::kThrowNotConstructor); - } - __ bind(&new_target_constructor); - } - - Label stack_done, stack_overflow; - __ Lw(t2, MemOperand(fp, StandardFrameConstants::kArgCOffset)); - __ Subu(t2, t2, Operand(kJSArgcReceiverSlots)); - __ Subu(t2, t2, a2); - __ Branch(&stack_done, le, t2, Operand(zero_reg)); - { - // Check for stack overflow. - __ StackOverflowCheck(t2, t0, t1, &stack_overflow); - - // Forward the arguments from the caller frame. - // Point to the first argument to copy (skipping the receiver). - __ Addu(t3, fp, - Operand(CommonFrameConstants::kFixedFrameSizeAboveFp + - kSystemPointerSize)); - __ Lsa(t3, t3, a2, kSystemPointerSizeLog2); - - // Move the arguments already in the stack, - // including the receiver and the return address. - // t2: Number of arguments to make room for. - // a0: Number of arguments already on the stack. - // a2: Points to first free slot on the stack after arguments were shifted. - Generate_AllocateSpaceAndShiftExistingArguments(masm, t2, a0, a2, t5, t6, - t7); - - // Copy arguments from the caller frame. - // TODO(victorgomes): Consider using forward order as potentially more cache - // friendly. - { - Label loop; - __ bind(&loop); - { - __ Subu(t2, t2, Operand(1)); - __ Lsa(kScratchReg, t3, t2, kPointerSizeLog2); - __ lw(kScratchReg, MemOperand(kScratchReg)); - __ Lsa(t0, a2, t2, kPointerSizeLog2); - __ Sw(kScratchReg, MemOperand(t0)); - __ Branch(&loop, ne, t2, Operand(zero_reg)); - } - } - } - __ Branch(&stack_done); - __ bind(&stack_overflow); - __ TailCallRuntime(Runtime::kThrowStackOverflow); - __ bind(&stack_done); - - // Tail-call to the {code} handler. - __ Jump(code, RelocInfo::CODE_TARGET); -} - -// static -void Builtins::Generate_CallFunction(MacroAssembler* masm, - ConvertReceiverMode mode) { - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the function to call (checked to be a JSFunction) - // ----------------------------------- - __ AssertCallableFunction(a1); - - __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - - // Enter the context of the function; ToObject has to run in the function - // context, and we also need to take the global proxy from the function - // context in case of conversion. - __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); - // We need to convert the receiver for non-native sloppy mode functions. - Label done_convert; - __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset)); - __ And(kScratchReg, a3, - Operand(SharedFunctionInfo::IsNativeBit::kMask | - SharedFunctionInfo::IsStrictBit::kMask)); - __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg)); - { - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the function to call (checked to be a JSFunction) - // -- a2 : the shared function info. - // -- cp : the function context. - // ----------------------------------- - - if (mode == ConvertReceiverMode::kNullOrUndefined) { - // Patch receiver to global proxy. - __ LoadGlobalProxy(a3); - } else { - Label convert_to_object, convert_receiver; - __ LoadReceiver(a3, a0); - __ JumpIfSmi(a3, &convert_to_object); - static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE); - __ GetObjectType(a3, t0, t0); - __ Branch(&done_convert, hs, t0, Operand(FIRST_JS_RECEIVER_TYPE)); - if (mode != ConvertReceiverMode::kNotNullOrUndefined) { - Label convert_global_proxy; - __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy); - __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object); - __ bind(&convert_global_proxy); - { - // Patch receiver to global proxy. - __ LoadGlobalProxy(a3); - } - __ Branch(&convert_receiver); - } - __ bind(&convert_to_object); - { - // Convert receiver using ToObject. - // TODO(bmeurer): Inline the allocation here to avoid building the frame - // in the fast case? (fall back to AllocateInNewSpace?) - FrameScope scope(masm, StackFrame::INTERNAL); - __ sll(a0, a0, kSmiTagSize); // Smi tagged. - __ Push(a0, a1); - __ mov(a0, a3); - __ Push(cp); - __ Call(BUILTIN_CODE(masm->isolate(), ToObject), - RelocInfo::CODE_TARGET); - __ Pop(cp); - __ mov(a3, v0); - __ Pop(a0, a1); - __ sra(a0, a0, kSmiTagSize); // Un-tag. - } - __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - __ bind(&convert_receiver); - } - __ StoreReceiver(a3, a0, kScratchReg); - } - __ bind(&done_convert); - - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the function to call (checked to be a JSFunction) - // -- a2 : the shared function info. - // -- cp : the function context. - // ----------------------------------- - - __ lhu(a2, - FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump); -} - -// static -void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the function to call (checked to be a JSBoundFunction) - // ----------------------------------- - __ AssertBoundFunction(a1); - - // Patch the receiver to [[BoundThis]]. - { - __ lw(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset)); - __ StoreReceiver(t0, a0, kScratchReg); - } - - // Load [[BoundArguments]] into a2 and length of that into t0. - __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); - __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset)); - __ SmiUntag(t0); - - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the function to call (checked to be a JSBoundFunction) - // -- a2 : the [[BoundArguments]] (implemented as FixedArray) - // -- t0 : the number of [[BoundArguments]] - // ----------------------------------- - - // Reserve stack space for the [[BoundArguments]]. - { - Label done; - __ sll(t1, t0, kPointerSizeLog2); - __ Subu(t1, sp, Operand(t1)); - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - __ LoadStackLimit(kScratchReg, - MacroAssembler::StackLimitKind::kRealStackLimit); - __ Branch(&done, hs, t1, Operand(kScratchReg)); - { - FrameScope scope(masm, StackFrame::MANUAL); - __ EnterFrame(StackFrame::INTERNAL); - __ CallRuntime(Runtime::kThrowStackOverflow); - } - __ bind(&done); - } - - // Pop receiver. - __ Pop(t1); - - // Push [[BoundArguments]]. - { - Label loop, done_loop; - __ Addu(a0, a0, Operand(t0)); - __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ bind(&loop); - __ Subu(t0, t0, Operand(1)); - __ Branch(&done_loop, lt, t0, Operand(zero_reg)); - __ Lsa(kScratchReg, a2, t0, kPointerSizeLog2); - __ Lw(kScratchReg, MemOperand(kScratchReg)); - __ Push(kScratchReg); - __ Branch(&loop); - __ bind(&done_loop); - } - - // Push receiver. - __ Push(t1); - - // Call the [[BoundTargetFunction]] via the Call builtin. - __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); - __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny), - RelocInfo::CODE_TARGET); -} - -// static -void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) { - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the target to call (can be any Object). - // ----------------------------------- - - Register argc = a0; - Register target = a1; - Register map = t1; - Register instance_type = t2; - Register scratch = t8; - DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); - - Label non_callable, class_constructor; - __ JumpIfSmi(target, &non_callable); - __ LoadMap(map, target); - __ GetInstanceTypeRange(map, instance_type, FIRST_CALLABLE_JS_FUNCTION_TYPE, - scratch); - __ Jump(masm->isolate()->builtins()->CallFunction(mode), - RelocInfo::CODE_TARGET, ls, scratch, - Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - - FIRST_CALLABLE_JS_FUNCTION_TYPE)); - __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction), - RelocInfo::CODE_TARGET, eq, instance_type, - Operand(JS_BOUND_FUNCTION_TYPE)); - - // Check if target has a [[Call]] internal method. - { - Register flags = t1; - __ lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); - map = no_reg; - __ And(flags, flags, Operand(Map::Bits1::IsCallableBit::kMask)); - __ Branch(&non_callable, eq, flags, Operand(zero_reg)); - } - - // Check if target is a proxy and call CallProxy external builtin - __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq, - instance_type, Operand(JS_PROXY_TYPE)); - - // Check if target is a wrapped function and call CallWrappedFunction external - // builtin - __ Jump(BUILTIN_CODE(masm->isolate(), CallWrappedFunction), - RelocInfo::CODE_TARGET, eq, instance_type, - Operand(JS_WRAPPED_FUNCTION_TYPE)); - - // ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList) - // Check that the function is not a "classConstructor". - __ Branch(&class_constructor, eq, instance_type, - Operand(JS_CLASS_CONSTRUCTOR_TYPE)); - - // 2. Call to something else, which might have a [[Call]] internal method (if - // not we raise an exception). - // Overwrite the original receiver with the (original) target. - __ StoreReceiver(target, argc, kScratchReg); - // Let the "call_as_function_delegate" take care of the rest. - __ LoadNativeContextSlot(target, Context::CALL_AS_FUNCTION_DELEGATE_INDEX); - __ Jump(masm->isolate()->builtins()->CallFunction( - ConvertReceiverMode::kNotNullOrUndefined), - RelocInfo::CODE_TARGET); - - // 3. Call to something that is not callable. - __ bind(&non_callable); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(target); - __ CallRuntime(Runtime::kThrowCalledNonCallable); - } - - // 4. The function is a "classConstructor", need to raise an exception. - __ bind(&class_constructor); - { - FrameScope frame(masm, StackFrame::INTERNAL); - __ Push(target); - __ CallRuntime(Runtime::kThrowConstructorNonCallableError); - } -} - -// static -void Builtins::Generate_ConstructFunction(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the constructor to call (checked to be a JSFunction) - // -- a3 : the new target (checked to be a constructor) - // ----------------------------------- - __ AssertConstructor(a1); - __ AssertFunction(a1); - - // Calling convention for function specific ConstructStubs require - // a2 to contain either an AllocationSite or undefined. - __ LoadRoot(a2, RootIndex::kUndefinedValue); - - Label call_generic_stub; - - // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric. - __ lw(t0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - __ lw(t0, FieldMemOperand(t0, SharedFunctionInfo::kFlagsOffset)); - __ And(t0, t0, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask)); - __ Branch(&call_generic_stub, eq, t0, Operand(zero_reg)); - - __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub), - RelocInfo::CODE_TARGET); - - __ bind(&call_generic_stub); - __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric), - RelocInfo::CODE_TARGET); -} - -// static -void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the function to call (checked to be a JSBoundFunction) - // -- a3 : the new target (checked to be a constructor) - // ----------------------------------- - __ AssertConstructor(a1); - __ AssertBoundFunction(a1); - - // Load [[BoundArguments]] into a2 and length of that into t0. - __ lw(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset)); - __ lw(t0, FieldMemOperand(a2, FixedArray::kLengthOffset)); - __ SmiUntag(t0); - - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the function to call (checked to be a JSBoundFunction) - // -- a2 : the [[BoundArguments]] (implemented as FixedArray) - // -- a3 : the new target (checked to be a constructor) - // -- t0 : the number of [[BoundArguments]] - // ----------------------------------- - - // Reserve stack space for the [[BoundArguments]]. - { - Label done; - __ sll(t1, t0, kPointerSizeLog2); - __ Subu(t1, sp, Operand(t1)); - // Check the stack for overflow. We are not trying to catch interruptions - // (i.e. debug break and preemption) here, so check the "real stack limit". - __ LoadStackLimit(kScratchReg, - MacroAssembler::StackLimitKind::kRealStackLimit); - __ Branch(&done, hs, t1, Operand(kScratchReg)); - { - FrameScope scope(masm, StackFrame::MANUAL); - __ EnterFrame(StackFrame::INTERNAL); - __ CallRuntime(Runtime::kThrowStackOverflow); - } - __ bind(&done); - } - - // Pop receiver - __ Pop(t1); - - // Push [[BoundArguments]]. - { - Label loop, done_loop; - __ Addu(a0, a0, Operand(t0)); - __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ bind(&loop); - __ Subu(t0, t0, Operand(1)); - __ Branch(&done_loop, lt, t0, Operand(zero_reg)); - __ Lsa(kScratchReg, a2, t0, kPointerSizeLog2); - __ Lw(kScratchReg, MemOperand(kScratchReg)); - __ Push(kScratchReg); - __ Branch(&loop); - __ bind(&done_loop); - } - - // Push receiver. - __ Push(t1); - - // Patch new.target to [[BoundTargetFunction]] if new.target equals target. - { - Label skip_load; - __ Branch(&skip_load, ne, a1, Operand(a3)); - __ lw(a3, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); - __ bind(&skip_load); - } - - // Construct the [[BoundTargetFunction]] via the Construct builtin. - __ lw(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset)); - __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET); -} - -// static -void Builtins::Generate_Construct(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : the number of arguments - // -- a1 : the constructor to call (can be any Object) - // -- a3 : the new target (either the same as the constructor or - // the JSFunction on which new was invoked initially) - // ----------------------------------- - - Register argc = a0; - Register target = a1; - Register map = t1; - Register instance_type = t2; - Register scratch = t8; - DCHECK(!AreAliased(argc, target, map, instance_type, scratch)); - - // Check if target is a Smi. - Label non_constructor, non_proxy; - __ JumpIfSmi(target, &non_constructor); - - // Check if target has a [[Construct]] internal method. - __ lw(map, FieldMemOperand(target, HeapObject::kMapOffset)); - { - Register flags = t3; - __ lbu(flags, FieldMemOperand(map, Map::kBitFieldOffset)); - __ And(flags, flags, Operand(Map::Bits1::IsConstructorBit::kMask)); - __ Branch(&non_constructor, eq, flags, Operand(zero_reg)); - } - - // Dispatch based on instance type. - __ GetInstanceTypeRange(map, instance_type, FIRST_JS_FUNCTION_TYPE, scratch); - __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction), - RelocInfo::CODE_TARGET, ls, scratch, - Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); - - // Only dispatch to bound functions after checking whether they are - // constructors. - __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction), - RelocInfo::CODE_TARGET, eq, instance_type, - Operand(JS_BOUND_FUNCTION_TYPE)); - - // Only dispatch to proxies after checking whether they are constructors. - __ Branch(&non_proxy, ne, instance_type, Operand(JS_PROXY_TYPE)); - __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy), - RelocInfo::CODE_TARGET); - - // Called Construct on an exotic Object with a [[Construct]] internal method. - __ bind(&non_proxy); - { - // Overwrite the original receiver with the (original) target. - __ StoreReceiver(target, argc, kScratchReg); - // Let the "call_as_constructor_delegate" take care of the rest. - __ LoadNativeContextSlot(target, - Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX); - __ Jump(masm->isolate()->builtins()->CallFunction(), - RelocInfo::CODE_TARGET); - } - - // Called Construct on an Object that doesn't have a [[Construct]] internal - // method. - __ bind(&non_constructor); - __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable), - RelocInfo::CODE_TARGET); -} - -#if V8_ENABLE_WEBASSEMBLY -void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { - // The function index was put in t0 by the jump table trampoline. - // Convert to Smi for the runtime call. - __ SmiTag(kWasmCompileLazyFuncIndexRegister); - - // Compute register lists for parameters to be saved. We save all parameter - // registers (see wasm-linkage.h). They might be overwritten in the runtime - // call below. We don't have any callee-saved registers in wasm, so no need to - // store anything else. - constexpr RegList kSavedGpRegs = ([]() constexpr { - RegList saved_gp_regs; - for (Register gp_param_reg : wasm::kGpParamRegisters) { - saved_gp_regs.set(gp_param_reg); - } - - // All set registers were unique. - CHECK_EQ(saved_gp_regs.Count(), arraysize(wasm::kGpParamRegisters)); - // The Wasm instance must be part of the saved registers. - CHECK(saved_gp_regs.has(kWasmInstanceRegister)); - CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs, - saved_gp_regs.Count()); - return saved_gp_regs; - })(); - - constexpr DoubleRegList kSavedFpRegs = ([]() constexpr { - DoubleRegList saved_fp_regs; - for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) { - saved_fp_regs.set(fp_param_reg); - } - - CHECK_EQ(saved_fp_regs.Count(), arraysize(wasm::kFpParamRegisters)); - CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs, - saved_fp_regs.Count()); - return saved_fp_regs; - })(); - - { - HardAbortScope hard_abort(masm); // Avoid calls to Abort. - FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); - - // Save registers that we need to keep alive across the runtime call. - __ MultiPush(kSavedGpRegs); - __ MultiPushFPU(kSavedFpRegs); - - // Pass instance and function index as an explicit arguments to the runtime - // function. - __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister); - // Initialize the JavaScript context with 0. CEntry will use it to - // set the current context on the isolate. - __ Move(kContextRegister, Smi::zero()); - __ CallRuntime(Runtime::kWasmCompileLazy, 2); - - // Restore registers. - __ MultiPopFPU(kSavedFpRegs); - __ MultiPop(kSavedGpRegs); - } - - // Untag the returned Smi, for later use. - static_assert(!kSavedGpRegs.has(v0)); - __ SmiUntag(v0); - - // The runtime function returned the jump table slot offset as a Smi (now in - // t8). Use that to compute the jump target. - static_assert(!kSavedGpRegs.has(t8)); - __ Lw(t8, - MemOperand(kWasmInstanceRegister, - WasmInstanceObject::kJumpTableStartOffset - kHeapObjectTag)); - __ Addu(t8, v0, t8); - - // Finally, jump to the jump table slot for the function. - __ Jump(t8); -} - -void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { - HardAbortScope hard_abort(masm); // Avoid calls to Abort. - { - FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK); - - // Save all parameter registers. They might hold live values, we restore - // them after the runtime call. - __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs); - __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); - - // Initialize the JavaScript context with 0. CEntry will use it to - // set the current context on the isolate. - __ Move(cp, Smi::zero()); - __ CallRuntime(Runtime::kWasmDebugBreak, 0); - - // Restore registers. - __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs); - __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs); - } - __ Ret(); -} - -void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { - __ Trap(); -} - -void Builtins::Generate_WasmReturnPromiseOnSuspend(MacroAssembler* masm) { - // TODO(v8:12191): Implement for this platform. - __ Trap(); -} - -void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { - // TODO(v8:12191): Implement for this platform. - __ Trap(); -} - -void Builtins::Generate_WasmResume(MacroAssembler* masm) { - // TODO(v8:12191): Implement for this platform. - __ Trap(); -} - -void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { - // Only needed on x64. - __ Trap(); -} - -#endif // V8_ENABLE_WEBASSEMBLY - -void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, - SaveFPRegsMode save_doubles, ArgvMode argv_mode, - bool builtin_exit_frame) { - // Called from JavaScript; parameters are on stack as if calling JS function - // a0: number of arguments including receiver - // a1: pointer to builtin function - // fp: frame pointer (restored after C call) - // sp: stack pointer (restored as callee's sp after C call) - // cp: current context (C callee-saved) - // - // If argv_mode == ArgvMode::kRegister: - // a2: pointer to the first argument - - if (argv_mode == ArgvMode::kRegister) { - // Move argv into the correct register. - __ mov(s1, a2); - } else { - // Compute the argv pointer in a callee-saved register. - __ Lsa(s1, sp, a0, kPointerSizeLog2); - __ Subu(s1, s1, kPointerSize); - } - - // Enter the exit frame that transitions from JavaScript to C++. - FrameScope scope(masm, StackFrame::MANUAL); - __ EnterExitFrame( - save_doubles == SaveFPRegsMode::kSave, 0, - builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); - - // s0: number of arguments including receiver (C callee-saved) - // s1: pointer to first argument (C callee-saved) - // s2: pointer to builtin function (C callee-saved) - - // Prepare arguments for C routine. - // a0 = argc - __ mov(s0, a0); - __ mov(s2, a1); - - // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We - // also need to reserve the 4 argument slots on the stack. - - __ AssertStackIsAligned(); - - // a0 = argc, a1 = argv, a2 = isolate - __ li(a2, ExternalReference::isolate_address(masm->isolate())); - __ mov(a1, s1); - - __ StoreReturnAddressAndCall(s2); - - // Result returned in v0 or v1:v0 - do not destroy these registers! - - // Check result for exception sentinel. - Label exception_returned; - __ LoadRoot(t0, RootIndex::kException); - __ Branch(&exception_returned, eq, t0, Operand(v0)); - - // Check that there is no pending exception, otherwise we - // should have returned the exception sentinel. - if (v8_flags.debug_code) { - Label okay; - ExternalReference pending_exception_address = ExternalReference::Create( - IsolateAddressId::kPendingExceptionAddress, masm->isolate()); - __ li(a2, pending_exception_address); - __ lw(a2, MemOperand(a2)); - __ LoadRoot(t0, RootIndex::kTheHoleValue); - // Cannot use check here as it attempts to generate call into runtime. - __ Branch(&okay, eq, t0, Operand(a2)); - __ stop(); - __ bind(&okay); - } - - // Exit C frame and return. - // v0:v1: result - // sp: stack pointer - // fp: frame pointer - Register argc = argv_mode == ArgvMode::kRegister - // We don't want to pop arguments so set argc to no_reg. - ? no_reg - // s0: still holds argc (callee-saved). - : s0; - __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN); - - // Handling of exception. - __ bind(&exception_returned); - - ExternalReference pending_handler_context_address = ExternalReference::Create( - IsolateAddressId::kPendingHandlerContextAddress, masm->isolate()); - ExternalReference pending_handler_entrypoint_address = - ExternalReference::Create( - IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate()); - ExternalReference pending_handler_fp_address = ExternalReference::Create( - IsolateAddressId::kPendingHandlerFPAddress, masm->isolate()); - ExternalReference pending_handler_sp_address = ExternalReference::Create( - IsolateAddressId::kPendingHandlerSPAddress, masm->isolate()); - - // Ask the runtime for help to determine the handler. This will set v0 to - // contain the current pending exception, don't clobber it. - ExternalReference find_handler = - ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler); - { - FrameScope scope(masm, StackFrame::MANUAL); - __ PrepareCallCFunction(3, 0, a0); - __ mov(a0, zero_reg); - __ mov(a1, zero_reg); - __ li(a2, ExternalReference::isolate_address(masm->isolate())); - __ CallCFunction(find_handler, 3); - } - - // Retrieve the handler context, SP and FP. - __ li(cp, pending_handler_context_address); - __ lw(cp, MemOperand(cp)); - __ li(sp, pending_handler_sp_address); - __ lw(sp, MemOperand(sp)); - __ li(fp, pending_handler_fp_address); - __ lw(fp, MemOperand(fp)); - - // If the handler is a JS frame, restore the context to the frame. Note that - // the context will be set to (cp == 0) for non-JS frames. - Label zero; - __ Branch(&zero, eq, cp, Operand(zero_reg)); - __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); - __ bind(&zero); - - // Clear c_entry_fp, like we do in `LeaveExitFrame`. - { - UseScratchRegisterScope temps(masm); - Register scratch = temps.Acquire(); - __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, - masm->isolate())); - __ Sw(zero_reg, MemOperand(scratch)); - } - - // Compute the handler entry address and jump to it. - __ li(t9, pending_handler_entrypoint_address); - __ lw(t9, MemOperand(t9)); - __ Jump(t9); -} - -void Builtins::Generate_DoubleToI(MacroAssembler* masm) { - Label done; - Register result_reg = t0; - - Register scratch = GetRegisterThatIsNotOneOf(result_reg); - Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch); - Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2); - DoubleRegister double_scratch = kScratchDoubleReg; - - // Account for saved regs. - const int kArgumentOffset = 4 * kPointerSize; - - __ Push(result_reg); - __ Push(scratch, scratch2, scratch3); - - // Load double input. - __ Ldc1(double_scratch, MemOperand(sp, kArgumentOffset)); - - // Try a conversion to a signed integer. - __ Trunc_w_d(double_scratch, double_scratch); - // Move the converted value into the result register. - __ mfc1(scratch3, double_scratch); - - // Retrieve the FCSR. - __ cfc1(scratch, FCSR); - - // Check for overflow and NaNs. - __ And(scratch, scratch, - kFCSROverflowCauseMask | kFCSRUnderflowCauseMask | - kFCSRInvalidOpCauseMask); - // If we had no exceptions then set result_reg and we are done. - Label error; - __ Branch(&error, ne, scratch, Operand(zero_reg)); - __ Move(result_reg, scratch3); - __ Branch(&done); - __ bind(&error); - - // Load the double value and perform a manual truncation. - Register input_high = scratch2; - Register input_low = scratch3; - - __ lw(input_low, MemOperand(sp, kArgumentOffset + Register::kMantissaOffset)); - __ lw(input_high, - MemOperand(sp, kArgumentOffset + Register::kExponentOffset)); - - Label normal_exponent; - // Extract the biased exponent in result. - __ Ext(result_reg, input_high, HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - - // Check for Infinity and NaNs, which should return 0. - __ Subu(scratch, result_reg, HeapNumber::kExponentMask); - __ Movz(result_reg, zero_reg, scratch); - __ Branch(&done, eq, scratch, Operand(zero_reg)); - - // Express exponent as delta to (number of mantissa bits + 31). - __ Subu(result_reg, result_reg, - Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31)); - - // If the delta is strictly positive, all bits would be shifted away, - // which means that we can return 0. - __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg)); - __ mov(result_reg, zero_reg); - __ Branch(&done); - - __ bind(&normal_exponent); - const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; - // Calculate shift. - __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits)); - - // Save the sign. - Register sign = result_reg; - result_reg = no_reg; - __ And(sign, input_high, Operand(HeapNumber::kSignMask)); - - // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need - // to check for this specific case. - Label high_shift_needed, high_shift_done; - __ Branch(&high_shift_needed, lt, scratch, Operand(32)); - __ mov(input_high, zero_reg); - __ Branch(&high_shift_done); - __ bind(&high_shift_needed); - - // Set the implicit 1 before the mantissa part in input_high. - __ Or(input_high, input_high, - Operand(1 << HeapNumber::kMantissaBitsInTopWord)); - // Shift the mantissa bits to the correct position. - // We don't need to clear non-mantissa bits as they will be shifted away. - // If they weren't, it would mean that the answer is in the 32bit range. - __ sllv(input_high, input_high, scratch); - - __ bind(&high_shift_done); - - // Replace the shifted bits with bits from the lower mantissa word. - Label pos_shift, shift_done; - __ li(kScratchReg, 32); - __ subu(scratch, kScratchReg, scratch); - __ Branch(&pos_shift, ge, scratch, Operand(zero_reg)); - - // Negate scratch. - __ Subu(scratch, zero_reg, scratch); - __ sllv(input_low, input_low, scratch); - __ Branch(&shift_done); - - __ bind(&pos_shift); - __ srlv(input_low, input_low, scratch); - - __ bind(&shift_done); - __ Or(input_high, input_high, Operand(input_low)); - // Restore sign if necessary. - __ mov(scratch, sign); - result_reg = sign; - sign = no_reg; - __ Subu(result_reg, zero_reg, input_high); - __ Movz(result_reg, input_high, scratch); - - __ bind(&done); - __ sw(result_reg, MemOperand(sp, kArgumentOffset)); - __ Pop(scratch, scratch2, scratch3); - __ Pop(result_reg); - __ Ret(); -} - -namespace { - -int AddressOffset(ExternalReference ref0, ExternalReference ref1) { - return ref0.address() - ref1.address(); -} - -// Calls an API function. Allocates HandleScope, extracts returned value -// from handle and propagates exceptions. Restores context. stack_space -// - space to be unwound on exit (includes the call JS arguments space and -// the additional space allocated for the fast call). -void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, - ExternalReference thunk_ref, int stack_space, - MemOperand* stack_space_operand, - MemOperand return_value_operand) { - ASM_CODE_COMMENT(masm); - Isolate* isolate = masm->isolate(); - ExternalReference next_address = - ExternalReference::handle_scope_next_address(isolate); - const int kNextOffset = 0; - const int kLimitOffset = AddressOffset( - ExternalReference::handle_scope_limit_address(isolate), next_address); - const int kLevelOffset = AddressOffset( - ExternalReference::handle_scope_level_address(isolate), next_address); - - DCHECK(function_address == a1 || function_address == a2); - - Label profiler_enabled, end_profiler_check; - __ li(t9, ExternalReference::is_profiling_address(isolate)); - __ lb(t9, MemOperand(t9, 0)); - __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); - __ li(t9, ExternalReference::address_of_runtime_stats_flag()); - __ lw(t9, MemOperand(t9, 0)); - __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); - { - // Call the api function directly. - __ mov(t9, function_address); - __ Branch(&end_profiler_check); - } - __ bind(&profiler_enabled); - { - // Additional parameter is the address of the actual callback. - __ li(t9, thunk_ref); - } - __ bind(&end_profiler_check); - - // Allocate HandleScope in callee-save registers. - __ li(s5, next_address); - __ lw(s0, MemOperand(s5, kNextOffset)); - __ lw(s1, MemOperand(s5, kLimitOffset)); - __ lw(s2, MemOperand(s5, kLevelOffset)); - __ Addu(s2, s2, Operand(1)); - __ sw(s2, MemOperand(s5, kLevelOffset)); - - __ StoreReturnAddressAndCall(t9); - - Label promote_scheduled_exception; - Label delete_allocated_handles; - Label leave_exit_frame; - Label return_value_loaded; - - // Load value from ReturnValue. - __ lw(v0, return_value_operand); - __ bind(&return_value_loaded); - - // No more valid handles (the result handle was the last one). Restore - // previous handle scope. - __ sw(s0, MemOperand(s5, kNextOffset)); - if (v8_flags.debug_code) { - __ lw(a1, MemOperand(s5, kLevelOffset)); - __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, - Operand(s2)); - } - __ Subu(s2, s2, Operand(1)); - __ sw(s2, MemOperand(s5, kLevelOffset)); - __ lw(kScratchReg, MemOperand(s5, kLimitOffset)); - __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg)); - - // Leave the API exit frame. - __ bind(&leave_exit_frame); - - if (stack_space_operand == nullptr) { - DCHECK_NE(stack_space, 0); - __ li(s0, Operand(stack_space)); - } else { - DCHECK_EQ(stack_space, 0); - // The ExitFrame contains four MIPS argument slots after the call so this - // must be accounted for. - // TODO(jgruber): Investigate if this is needed by the direct call. - __ Drop(kCArgSlotCount); - __ lw(s0, *stack_space_operand); - } - - static constexpr bool kDontSaveDoubles = false; - static constexpr bool kRegisterContainsSlotCount = false; - __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN, - kRegisterContainsSlotCount); - - // Check if the function scheduled an exception. - __ LoadRoot(t0, RootIndex::kTheHoleValue); - __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate)); - __ lw(t1, MemOperand(kScratchReg)); - __ Branch(&promote_scheduled_exception, ne, t0, Operand(t1)); - - __ Ret(); - - // Re-throw by promoting a scheduled exception. - __ bind(&promote_scheduled_exception); - __ TailCallRuntime(Runtime::kPromoteScheduledException); - - // HandleScope limit has changed. Delete allocated extensions. - __ bind(&delete_allocated_handles); - __ sw(s1, MemOperand(s5, kLimitOffset)); - __ mov(s0, v0); - __ mov(a0, v0); - __ PrepareCallCFunction(1, s1); - __ li(a0, ExternalReference::isolate_address(isolate)); - __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1); - __ mov(v0, s0); - __ jmp(&leave_exit_frame); -} - -} // namespace - -void Builtins::Generate_CallApiCallback(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- cp : context - // -- a1 : api function address - // -- a2 : arguments count - // -- a3 : call data - // -- a0 : holder - // -- sp[0] : receiver - // -- sp[8] : first argument - // -- ... - // -- sp[(argc) * 8] : last argument - // ----------------------------------- - - Register api_function_address = a1; - Register argc = a2; - Register call_data = a3; - Register holder = a0; - Register scratch = t0; - Register base = t1; // For addressing MemOperands on the stack. - - DCHECK(!AreAliased(api_function_address, argc, call_data, - holder, scratch, base)); - - using FCA = FunctionCallbackArguments; - - static_assert(FCA::kArgsLength == 6); - static_assert(FCA::kNewTargetIndex == 5); - static_assert(FCA::kDataIndex == 4); - static_assert(FCA::kReturnValueOffset == 3); - static_assert(FCA::kReturnValueDefaultValueIndex == 2); - static_assert(FCA::kIsolateIndex == 1); - static_assert(FCA::kHolderIndex == 0); - - // Set up FunctionCallbackInfo's implicit_args on the stack as follows: - // - // Target state: - // sp[0 * kPointerSize]: kHolder - // sp[1 * kPointerSize]: kIsolate - // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue) - // sp[3 * kPointerSize]: undefined (kReturnValue) - // sp[4 * kPointerSize]: kData - // sp[5 * kPointerSize]: undefined (kNewTarget) - - // Set up the base register for addressing through MemOperands. It will point - // at the receiver (located at sp + argc * kPointerSize). - __ Lsa(base, sp, argc, kPointerSizeLog2); - - // Reserve space on the stack. - __ Subu(sp, sp, Operand(FCA::kArgsLength * kPointerSize)); - - // kHolder. - __ sw(holder, MemOperand(sp, 0 * kPointerSize)); - - // kIsolate. - __ li(scratch, ExternalReference::isolate_address(masm->isolate())); - __ sw(scratch, MemOperand(sp, 1 * kPointerSize)); - - // kReturnValueDefaultValue and kReturnValue. - __ LoadRoot(scratch, RootIndex::kUndefinedValue); - __ sw(scratch, MemOperand(sp, 2 * kPointerSize)); - __ sw(scratch, MemOperand(sp, 3 * kPointerSize)); - - // kData. - __ sw(call_data, MemOperand(sp, 4 * kPointerSize)); - - // kNewTarget. - __ sw(scratch, MemOperand(sp, 5 * kPointerSize)); - - // Keep a pointer to kHolder (= implicit_args) in a scratch register. - // We use it below to set up the FunctionCallbackInfo object. - __ mov(scratch, sp); - - // Allocate the v8::Arguments structure in the arguments' space since - // it's not controlled by GC. - static constexpr int kApiStackSpace = 4; - static constexpr bool kDontSaveDoubles = false; - FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace); - - // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above). - // Arguments are after the return address (pushed by EnterExitFrame()). - __ sw(scratch, MemOperand(sp, 1 * kPointerSize)); - - // FunctionCallbackInfo::values_ (points at the first varargs argument passed - // on the stack). - __ Addu(scratch, scratch, - Operand((FCA::kArgsLength + 1) * kSystemPointerSize)); - __ sw(scratch, MemOperand(sp, 2 * kPointerSize)); - - // FunctionCallbackInfo::length_. - __ sw(argc, MemOperand(sp, 3 * kPointerSize)); - - // We also store the number of bytes to drop from the stack after returning - // from the API function here. - // Note: Unlike on other architectures, this stores the number of slots to - // drop, not the number of bytes. - __ Addu(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */)); - __ sw(scratch, MemOperand(sp, 4 * kPointerSize)); - - // v8::InvocationCallback's argument. - DCHECK(!AreAliased(api_function_address, scratch, a0)); - __ Addu(a0, sp, Operand(1 * kPointerSize)); - - ExternalReference thunk_ref = ExternalReference::invoke_function_callback(); - - // There are two stack slots above the arguments we constructed on the stack. - // TODO(jgruber): Document what these arguments are. - static constexpr int kStackSlotsAboveFCA = 2; - MemOperand return_value_operand( - fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize); - - static constexpr int kUseStackSpaceOperand = 0; - MemOperand stack_space_operand(sp, 4 * kPointerSize); - - AllowExternalCallThatCantCauseGC scope(masm); - CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, - kUseStackSpaceOperand, &stack_space_operand, - return_value_operand); -} - -void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { - // Build v8::PropertyCallbackInfo::args_ array on the stack and push property - // name below the exit frame to make GC aware of them. - static_assert(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0); - static_assert(PropertyCallbackArguments::kHolderIndex == 1); - static_assert(PropertyCallbackArguments::kIsolateIndex == 2); - static_assert(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3); - static_assert(PropertyCallbackArguments::kReturnValueOffset == 4); - static_assert(PropertyCallbackArguments::kDataIndex == 5); - static_assert(PropertyCallbackArguments::kThisIndex == 6); - static_assert(PropertyCallbackArguments::kArgsLength == 7); - - Register receiver = ApiGetterDescriptor::ReceiverRegister(); - Register holder = ApiGetterDescriptor::HolderRegister(); - Register callback = ApiGetterDescriptor::CallbackRegister(); - Register scratch = t0; - DCHECK(!AreAliased(receiver, holder, callback, scratch)); - - Register api_function_address = a2; - - // Here and below +1 is for name() pushed after the args_ array. - using PCA = PropertyCallbackArguments; - __ Subu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize); - __ sw(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize)); - __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset)); - __ sw(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize)); - __ LoadRoot(scratch, RootIndex::kUndefinedValue); - __ sw(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize)); - __ sw(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) * - kPointerSize)); - __ li(scratch, ExternalReference::isolate_address(masm->isolate())); - __ sw(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize)); - __ sw(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize)); - // should_throw_on_error -> false - DCHECK_EQ(0, Smi::zero().ptr()); - __ sw(zero_reg, - MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize)); - __ lw(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset)); - __ sw(scratch, MemOperand(sp, 0 * kPointerSize)); - - // v8::PropertyCallbackInfo::args_ array and name handle. - const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; - - // Load address of v8::PropertyAccessorInfo::args_ array and name handle. - __ mov(a0, sp); // a0 = Handle - __ Addu(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_ - - const int kApiStackSpace = 1; - FrameScope frame_scope(masm, StackFrame::MANUAL); - __ EnterExitFrame(false, kApiStackSpace); - - // Create v8::PropertyCallbackInfo object on the stack and initialize - // it's args_ field. - __ sw(a1, MemOperand(sp, 1 * kPointerSize)); - __ Addu(a1, sp, Operand(1 * kPointerSize)); // a1 = v8::PropertyCallbackInfo& - - ExternalReference thunk_ref = - ExternalReference::invoke_accessor_getter_callback(); - - __ lw(api_function_address, - FieldMemOperand(callback, AccessorInfo::kMaybeRedirectedGetterOffset)); - - // +3 is to skip prolog, return address and name handle. - MemOperand return_value_operand( - fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); - MemOperand* const kUseStackSpaceConstant = nullptr; - CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, - kStackUnwindSpace, kUseStackSpaceConstant, - return_value_operand); -} - -void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { - // The sole purpose of DirectCEntry is for movable callers (e.g. any general - // purpose Code object) to be able to call into C functions that may trigger - // GC and thus move the caller. - // - // DirectCEntry places the return address on the stack (updated by the GC), - // making the call GC safe. The irregexp backend relies on this. - - // Make place for arguments to fit C calling convention. Callers use - // EnterExitFrame/LeaveExitFrame so they handle stack restoring and we don't - // have to do that here. Any caller must drop kCArgsSlotsSize stack space - // after the call. - __ Subu(sp, sp, Operand(kCArgsSlotsSize)); - - __ sw(ra, MemOperand(sp, kCArgsSlotsSize)); // Store the return address. - __ Call(t9); // Call the C++ function. - __ lw(t9, MemOperand(sp, kCArgsSlotsSize)); // Return to calling code. - - if (v8_flags.debug_code && v8_flags.enable_slow_asserts) { - // In case of an error the return address may point to a memory area - // filled with kZapValue by the GC. Dereference the address and check for - // this. - __ lw(t0, MemOperand(t9)); - __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, t0, - Operand(reinterpret_cast(kZapValue))); - } - - __ Jump(t9); -} - -void Builtins::Generate_MemCopyUint8Uint8(MacroAssembler* masm) { - // This code assumes that cache lines are 32 bytes and if the cache line is - // larger it will not work correctly. - { - Label lastb, unaligned, aligned, chkw, loop16w, chk1w, wordCopy_loop, - skip_pref, lastbloop, leave, ua_chk16w, ua_loop16w, ua_skip_pref, - ua_chkw, ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop; - - // The size of each prefetch. - uint32_t pref_chunk = 32; - // The maximum size of a prefetch, it must not be less than pref_chunk. - // If the real size of a prefetch is greater than max_pref_size and - // the kPrefHintPrepareForStore hint is used, the code will not work - // correctly. - uint32_t max_pref_size = 128; - DCHECK(pref_chunk < max_pref_size); - - // pref_limit is set based on the fact that we never use an offset - // greater then 5 on a store pref and that a single pref can - // never be larger then max_pref_size. - uint32_t pref_limit = (5 * pref_chunk) + max_pref_size; - int32_t pref_hint_load = kPrefHintLoadStreamed; - int32_t pref_hint_store = kPrefHintPrepareForStore; - uint32_t loadstore_chunk = 4; - - // The initial prefetches may fetch bytes that are before the buffer being - // copied. Start copies with an offset of 4 so avoid this situation when - // using kPrefHintPrepareForStore. - DCHECK(pref_hint_store != kPrefHintPrepareForStore || - pref_chunk * 4 >= max_pref_size); - - // If the size is less than 8, go to lastb. Regardless of size, - // copy dst pointer to v0 for the retuen value. - __ slti(t2, a2, 2 * loadstore_chunk); - __ bne(t2, zero_reg, &lastb); - __ mov(v0, a0); // In delay slot. - - // If src and dst have different alignments, go to unaligned, if they - // have the same alignment (but are not actually aligned) do a partial - // load/store to make them aligned. If they are both already aligned - // we can start copying at aligned. - __ xor_(t8, a1, a0); - __ andi(t8, t8, loadstore_chunk - 1); // t8 is a0/a1 word-displacement. - __ bne(t8, zero_reg, &unaligned); - __ subu(a3, zero_reg, a0); // In delay slot. - - __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. - __ beq(a3, zero_reg, &aligned); // Already aligned. - __ subu(a2, a2, a3); // In delay slot. a2 is the remining bytes count. - - if (kArchEndian == kLittle) { - __ lwr(t8, MemOperand(a1)); - __ addu(a1, a1, a3); - __ swr(t8, MemOperand(a0)); - __ addu(a0, a0, a3); - } else { - __ lwl(t8, MemOperand(a1)); - __ addu(a1, a1, a3); - __ swl(t8, MemOperand(a0)); - __ addu(a0, a0, a3); - } - // Now dst/src are both aligned to (word) aligned addresses. Set a2 to - // count how many bytes we have to copy after all the 64 byte chunks are - // copied and a3 to the dst pointer after all the 64 byte chunks have been - // copied. We will loop, incrementing a0 and a1 until a0 equals a3. - __ bind(&aligned); - __ andi(t8, a2, 0x3F); - __ beq(a2, t8, &chkw); // Less than 64? - __ subu(a3, a2, t8); // In delay slot. - __ addu(a3, a0, a3); // Now a3 is the final dst after loop. - - // When in the loop we prefetch with kPrefHintPrepareForStore hint, - // in this case the a0+x should be past the "t0-32" address. This means: - // for x=128 the last "safe" a0 address is "t0-160". Alternatively, for - // x=64 the last "safe" a0 address is "t0-96". In the current version we - // will use "pref hint, 128(a0)", so "t0-160" is the limit. - if (pref_hint_store == kPrefHintPrepareForStore) { - __ addu(t0, a0, a2); // t0 is the "past the end" address. - __ Subu(t9, t0, pref_limit); // t9 is the "last safe pref" address. - } - - __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); - __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); - __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); - __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); - - if (pref_hint_store != kPrefHintPrepareForStore) { - __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); - __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); - __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); - } - __ bind(&loop16w); - __ lw(t0, MemOperand(a1)); - - if (pref_hint_store == kPrefHintPrepareForStore) { - __ sltu(v1, t9, a0); // If a0 > t9, don't use next prefetch. - __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg)); - } - __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); // Maybe in delay slot. - - __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); - __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); - - __ bind(&skip_pref); - __ lw(t2, MemOperand(a1, 2, loadstore_chunk)); - __ lw(t3, MemOperand(a1, 3, loadstore_chunk)); - __ lw(t4, MemOperand(a1, 4, loadstore_chunk)); - __ lw(t5, MemOperand(a1, 5, loadstore_chunk)); - __ lw(t6, MemOperand(a1, 6, loadstore_chunk)); - __ lw(t7, MemOperand(a1, 7, loadstore_chunk)); - __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); - - __ sw(t0, MemOperand(a0)); - __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); - __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); - __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); - __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); - __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); - __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); - __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); - - __ lw(t0, MemOperand(a1, 8, loadstore_chunk)); - __ lw(t1, MemOperand(a1, 9, loadstore_chunk)); - __ lw(t2, MemOperand(a1, 10, loadstore_chunk)); - __ lw(t3, MemOperand(a1, 11, loadstore_chunk)); - __ lw(t4, MemOperand(a1, 12, loadstore_chunk)); - __ lw(t5, MemOperand(a1, 13, loadstore_chunk)); - __ lw(t6, MemOperand(a1, 14, loadstore_chunk)); - __ lw(t7, MemOperand(a1, 15, loadstore_chunk)); - __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); - - __ sw(t0, MemOperand(a0, 8, loadstore_chunk)); - __ sw(t1, MemOperand(a0, 9, loadstore_chunk)); - __ sw(t2, MemOperand(a0, 10, loadstore_chunk)); - __ sw(t3, MemOperand(a0, 11, loadstore_chunk)); - __ sw(t4, MemOperand(a0, 12, loadstore_chunk)); - __ sw(t5, MemOperand(a0, 13, loadstore_chunk)); - __ sw(t6, MemOperand(a0, 14, loadstore_chunk)); - __ sw(t7, MemOperand(a0, 15, loadstore_chunk)); - __ addiu(a0, a0, 16 * loadstore_chunk); - __ bne(a0, a3, &loop16w); - __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. - __ mov(a2, t8); - - // Here we have src and dest word-aligned but less than 64-bytes to go. - // Check for a 32 bytes chunk and copy if there is one. Otherwise jump - // down to chk1w to handle the tail end of the copy. - __ bind(&chkw); - __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); - __ andi(t8, a2, 0x1F); - __ beq(a2, t8, &chk1w); // Less than 32? - __ nop(); // In delay slot. - __ lw(t0, MemOperand(a1)); - __ lw(t1, MemOperand(a1, 1, loadstore_chunk)); - __ lw(t2, MemOperand(a1, 2, loadstore_chunk)); - __ lw(t3, MemOperand(a1, 3, loadstore_chunk)); - __ lw(t4, MemOperand(a1, 4, loadstore_chunk)); - __ lw(t5, MemOperand(a1, 5, loadstore_chunk)); - __ lw(t6, MemOperand(a1, 6, loadstore_chunk)); - __ lw(t7, MemOperand(a1, 7, loadstore_chunk)); - __ addiu(a1, a1, 8 * loadstore_chunk); - __ sw(t0, MemOperand(a0)); - __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); - __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); - __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); - __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); - __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); - __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); - __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); - __ addiu(a0, a0, 8 * loadstore_chunk); - - // Here we have less than 32 bytes to copy. Set up for a loop to copy - // one word at a time. Set a2 to count how many bytes we have to copy - // after all the word chunks are copied and a3 to the dst pointer after - // all the word chunks have been copied. We will loop, incrementing a0 - // and a1 until a0 equals a3. - __ bind(&chk1w); - __ andi(a2, t8, loadstore_chunk - 1); - __ beq(a2, t8, &lastb); - __ subu(a3, t8, a2); // In delay slot. - __ addu(a3, a0, a3); - - __ bind(&wordCopy_loop); - __ lw(t3, MemOperand(a1)); - __ addiu(a0, a0, loadstore_chunk); - __ addiu(a1, a1, loadstore_chunk); - __ bne(a0, a3, &wordCopy_loop); - __ sw(t3, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. - - __ bind(&lastb); - __ Branch(&leave, le, a2, Operand(zero_reg)); - __ addu(a3, a0, a2); - - __ bind(&lastbloop); - __ lb(v1, MemOperand(a1)); - __ addiu(a0, a0, 1); - __ addiu(a1, a1, 1); - __ bne(a0, a3, &lastbloop); - __ sb(v1, MemOperand(a0, -1)); // In delay slot. - - __ bind(&leave); - __ jr(ra); - __ nop(); - - // Unaligned case. Only the dst gets aligned so we need to do partial - // loads of the source followed by normal stores to the dst (once we - // have aligned the destination). - __ bind(&unaligned); - __ andi(a3, a3, loadstore_chunk - 1); // Copy a3 bytes to align a0/a1. - __ beq(a3, zero_reg, &ua_chk16w); - __ subu(a2, a2, a3); // In delay slot. - - if (kArchEndian == kLittle) { - __ lwr(v1, MemOperand(a1)); - __ lwl(v1, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - __ addu(a1, a1, a3); - __ swr(v1, MemOperand(a0)); - __ addu(a0, a0, a3); - } else { - __ lwl(v1, MemOperand(a1)); - __ lwr(v1, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - __ addu(a1, a1, a3); - __ swl(v1, MemOperand(a0)); - __ addu(a0, a0, a3); - } - - // Now the dst (but not the source) is aligned. Set a2 to count how many - // bytes we have to copy after all the 64 byte chunks are copied and a3 to - // the dst pointer after all the 64 byte chunks have been copied. We will - // loop, incrementing a0 and a1 until a0 equals a3. - __ bind(&ua_chk16w); - __ andi(t8, a2, 0x3F); - __ beq(a2, t8, &ua_chkw); - __ subu(a3, a2, t8); // In delay slot. - __ addu(a3, a0, a3); - - if (pref_hint_store == kPrefHintPrepareForStore) { - __ addu(t0, a0, a2); - __ Subu(t9, t0, pref_limit); - } - - __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk)); - __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk)); - __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk)); - - if (pref_hint_store != kPrefHintPrepareForStore) { - __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk)); - __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk)); - __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk)); - } - - __ bind(&ua_loop16w); - __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk)); - if (kArchEndian == kLittle) { - __ lwr(t0, MemOperand(a1)); - __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); - __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); - - if (pref_hint_store == kPrefHintPrepareForStore) { - __ sltu(v1, t9, a0); - __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); - } - __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. - - __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); - __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); - - __ bind(&ua_skip_pref); - __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); - __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); - __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); - __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); - __ lwl(t0, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t1, - MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t2, - MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t3, - MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t4, - MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t5, - MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t6, - MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t7, - MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); - } else { - __ lwl(t0, MemOperand(a1)); - __ lwl(t1, MemOperand(a1, 1, loadstore_chunk)); - __ lwl(t2, MemOperand(a1, 2, loadstore_chunk)); - - if (pref_hint_store == kPrefHintPrepareForStore) { - __ sltu(v1, t9, a0); - __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg)); - } - __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); // Maybe in delay slot. - - __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk)); - __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk)); - - __ bind(&ua_skip_pref); - __ lwl(t4, MemOperand(a1, 4, loadstore_chunk)); - __ lwl(t5, MemOperand(a1, 5, loadstore_chunk)); - __ lwl(t6, MemOperand(a1, 6, loadstore_chunk)); - __ lwl(t7, MemOperand(a1, 7, loadstore_chunk)); - __ lwr(t0, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t1, - MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t2, - MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t3, - MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t4, - MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t5, - MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t6, - MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t7, - MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); - } - __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk)); - __ sw(t0, MemOperand(a0)); - __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); - __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); - __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); - __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); - __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); - __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); - __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); - if (kArchEndian == kLittle) { - __ lwr(t0, MemOperand(a1, 8, loadstore_chunk)); - __ lwr(t1, MemOperand(a1, 9, loadstore_chunk)); - __ lwr(t2, MemOperand(a1, 10, loadstore_chunk)); - __ lwr(t3, MemOperand(a1, 11, loadstore_chunk)); - __ lwr(t4, MemOperand(a1, 12, loadstore_chunk)); - __ lwr(t5, MemOperand(a1, 13, loadstore_chunk)); - __ lwr(t6, MemOperand(a1, 14, loadstore_chunk)); - __ lwr(t7, MemOperand(a1, 15, loadstore_chunk)); - __ lwl(t0, - MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t1, - MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t2, - MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t3, - MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t4, - MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t5, - MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t6, - MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t7, - MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); - } else { - __ lwl(t0, MemOperand(a1, 8, loadstore_chunk)); - __ lwl(t1, MemOperand(a1, 9, loadstore_chunk)); - __ lwl(t2, MemOperand(a1, 10, loadstore_chunk)); - __ lwl(t3, MemOperand(a1, 11, loadstore_chunk)); - __ lwl(t4, MemOperand(a1, 12, loadstore_chunk)); - __ lwl(t5, MemOperand(a1, 13, loadstore_chunk)); - __ lwl(t6, MemOperand(a1, 14, loadstore_chunk)); - __ lwl(t7, MemOperand(a1, 15, loadstore_chunk)); - __ lwr(t0, - MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t1, - MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t2, - MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t3, - MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t4, - MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t5, - MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t6, - MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t7, - MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one)); - } - __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk)); - __ sw(t0, MemOperand(a0, 8, loadstore_chunk)); - __ sw(t1, MemOperand(a0, 9, loadstore_chunk)); - __ sw(t2, MemOperand(a0, 10, loadstore_chunk)); - __ sw(t3, MemOperand(a0, 11, loadstore_chunk)); - __ sw(t4, MemOperand(a0, 12, loadstore_chunk)); - __ sw(t5, MemOperand(a0, 13, loadstore_chunk)); - __ sw(t6, MemOperand(a0, 14, loadstore_chunk)); - __ sw(t7, MemOperand(a0, 15, loadstore_chunk)); - __ addiu(a0, a0, 16 * loadstore_chunk); - __ bne(a0, a3, &ua_loop16w); - __ addiu(a1, a1, 16 * loadstore_chunk); // In delay slot. - __ mov(a2, t8); - - // Here less than 64-bytes. Check for - // a 32 byte chunk and copy if there is one. Otherwise jump down to - // ua_chk1w to handle the tail end of the copy. - __ bind(&ua_chkw); - __ Pref(pref_hint_load, MemOperand(a1)); - __ andi(t8, a2, 0x1F); - - __ beq(a2, t8, &ua_chk1w); - __ nop(); // In delay slot. - if (kArchEndian == kLittle) { - __ lwr(t0, MemOperand(a1)); - __ lwr(t1, MemOperand(a1, 1, loadstore_chunk)); - __ lwr(t2, MemOperand(a1, 2, loadstore_chunk)); - __ lwr(t3, MemOperand(a1, 3, loadstore_chunk)); - __ lwr(t4, MemOperand(a1, 4, loadstore_chunk)); - __ lwr(t5, MemOperand(a1, 5, loadstore_chunk)); - __ lwr(t6, MemOperand(a1, 6, loadstore_chunk)); - __ lwr(t7, MemOperand(a1, 7, loadstore_chunk)); - __ lwl(t0, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t1, - MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t2, - MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t3, - MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t4, - MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t5, - MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t6, - MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwl(t7, - MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); - } else { - __ lwl(t0, MemOperand(a1)); - __ lwl(t1, MemOperand(a1, 1, loadstore_chunk)); - __ lwl(t2, MemOperand(a1, 2, loadstore_chunk)); - __ lwl(t3, MemOperand(a1, 3, loadstore_chunk)); - __ lwl(t4, MemOperand(a1, 4, loadstore_chunk)); - __ lwl(t5, MemOperand(a1, 5, loadstore_chunk)); - __ lwl(t6, MemOperand(a1, 6, loadstore_chunk)); - __ lwl(t7, MemOperand(a1, 7, loadstore_chunk)); - __ lwr(t0, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t1, - MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t2, - MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t3, - MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t4, - MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t5, - MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t6, - MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one)); - __ lwr(t7, - MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one)); - } - __ addiu(a1, a1, 8 * loadstore_chunk); - __ sw(t0, MemOperand(a0)); - __ sw(t1, MemOperand(a0, 1, loadstore_chunk)); - __ sw(t2, MemOperand(a0, 2, loadstore_chunk)); - __ sw(t3, MemOperand(a0, 3, loadstore_chunk)); - __ sw(t4, MemOperand(a0, 4, loadstore_chunk)); - __ sw(t5, MemOperand(a0, 5, loadstore_chunk)); - __ sw(t6, MemOperand(a0, 6, loadstore_chunk)); - __ sw(t7, MemOperand(a0, 7, loadstore_chunk)); - __ addiu(a0, a0, 8 * loadstore_chunk); - - // Less than 32 bytes to copy. Set up for a loop to - // copy one word at a time. - __ bind(&ua_chk1w); - __ andi(a2, t8, loadstore_chunk - 1); - __ beq(a2, t8, &ua_smallCopy); - __ subu(a3, t8, a2); // In delay slot. - __ addu(a3, a0, a3); - - __ bind(&ua_wordCopy_loop); - if (kArchEndian == kLittle) { - __ lwr(v1, MemOperand(a1)); - __ lwl(v1, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - } else { - __ lwl(v1, MemOperand(a1)); - __ lwr(v1, - MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one)); - } - __ addiu(a0, a0, loadstore_chunk); - __ addiu(a1, a1, loadstore_chunk); - __ bne(a0, a3, &ua_wordCopy_loop); - __ sw(v1, MemOperand(a0, -1, loadstore_chunk)); // In delay slot. - - // Copy the last 8 bytes. - __ bind(&ua_smallCopy); - __ beq(a2, zero_reg, &leave); - __ addu(a3, a0, a2); // In delay slot. - - __ bind(&ua_smallCopy_loop); - __ lb(v1, MemOperand(a1)); - __ addiu(a0, a0, 1); - __ addiu(a1, a1, 1); - __ bne(a0, a3, &ua_smallCopy_loop); - __ sb(v1, MemOperand(a0, -1)); // In delay slot. - - __ jr(ra); - __ nop(); - } -} - -namespace { - -// This code tries to be close to ia32 code so that any changes can be -// easily ported. -void Generate_DeoptimizationEntry(MacroAssembler* masm, - DeoptimizeKind deopt_kind) { - Isolate* isolate = masm->isolate(); - - // Unlike on ARM we don't save all the registers, just the useful ones. - // For the rest, there are gaps on the stack, so the offsets remain the same. - static constexpr int kNumberOfRegisters = Register::kNumRegisters; - - RegList restored_regs = kJSCallerSaved | kCalleeSaved; - RegList saved_regs = restored_regs | sp | ra; - - static constexpr int kDoubleRegsSize = - kDoubleSize * DoubleRegister::kNumRegisters; - - // Save all FPU registers before messing with them. - __ Subu(sp, sp, Operand(kDoubleRegsSize)); - const RegisterConfiguration* config = RegisterConfiguration::Default(); - for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { - int code = config->GetAllocatableDoubleCode(i); - const DoubleRegister fpu_reg = DoubleRegister::from_code(code); - int offset = code * kDoubleSize; - __ Sdc1(fpu_reg, MemOperand(sp, offset)); - } - - // Push saved_regs (needed to populate FrameDescription::registers_). - // Leave gaps for other registers. - __ Subu(sp, sp, kNumberOfRegisters * kPointerSize); - for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { - if ((saved_regs.bits() & (1 << i)) != 0) { - __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i)); - } - } - - __ li(a2, - ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate)); - __ sw(fp, MemOperand(a2)); - - static constexpr int kSavedRegistersAreaSize = - (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; - - // Get the address of the location in the code object (a2) (return - // address for lazy deoptimization) and compute the fp-to-sp delta in - // register a3. - __ mov(a2, ra); - __ Addu(a3, sp, Operand(kSavedRegistersAreaSize)); - __ Subu(a3, fp, a3); - - // Allocate a new deoptimizer object. - __ PrepareCallCFunction(5, t0); - // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack. - __ mov(a0, zero_reg); - Label context_check; - __ lw(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); - __ JumpIfSmi(a1, &context_check); - __ lw(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ bind(&context_check); - __ li(a1, Operand(static_cast(deopt_kind))); - // a2: code address or 0 already loaded. - // a3: Fp-to-sp delta already loaded. - __ li(t0, ExternalReference::isolate_address(isolate)); - __ sw(t0, CFunctionArgumentOperand(5)); // Isolate. - // Call Deoptimizer::New(). - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5); - } - - // Preserve "deoptimizer" object in register v0 and get the input - // frame descriptor pointer to a1 (deoptimizer->input_); - // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below. - __ mov(a0, v0); - __ lw(a1, MemOperand(v0, Deoptimizer::input_offset())); - - // Copy core registers into FrameDescription::registers_[kNumRegisters]. - DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); - for (int i = 0; i < kNumberOfRegisters; i++) { - int offset = (i * kPointerSize) + FrameDescription::registers_offset(); - if ((saved_regs.bits() & (1 << i)) != 0) { - __ lw(a2, MemOperand(sp, i * kPointerSize)); - __ sw(a2, MemOperand(a1, offset)); - } else if (v8_flags.debug_code) { - __ li(a2, kDebugZapValue); - __ sw(a2, MemOperand(a1, offset)); - } - } - - int double_regs_offset = FrameDescription::double_registers_offset(); - // Copy FPU registers to - // double_registers_[DoubleRegister::kNumAllocatableRegisters] - for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { - int code = config->GetAllocatableDoubleCode(i); - int dst_offset = code * kDoubleSize + double_regs_offset; - int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; - __ Ldc1(f0, MemOperand(sp, src_offset)); - __ Sdc1(f0, MemOperand(a1, dst_offset)); - } - - // Remove the saved registers from the stack. - __ Addu(sp, sp, Operand(kSavedRegistersAreaSize)); - - // Compute a pointer to the unwinding limit in register a2; that is - // the first stack slot not part of the input frame. - __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset())); - __ Addu(a2, a2, sp); - - // Unwind the stack down to - but not including - the unwinding - // limit and copy the contents of the activation frame to the input - // frame description. - __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset())); - Label pop_loop; - Label pop_loop_header; - __ BranchShort(&pop_loop_header); - __ bind(&pop_loop); - __ pop(t0); - __ sw(t0, MemOperand(a3, 0)); - __ addiu(a3, a3, sizeof(uint32_t)); - __ bind(&pop_loop_header); - __ BranchShort(&pop_loop, ne, a2, Operand(sp)); - - // Compute the output frame in the deoptimizer. - __ push(a0); // Preserve deoptimizer object across call. - // a0: deoptimizer object; a1: scratch. - __ PrepareCallCFunction(1, a1); - // Call Deoptimizer::ComputeOutputFrames(). - { - AllowExternalCallThatCantCauseGC scope(masm); - __ CallCFunction(ExternalReference::compute_output_frames_function(), 1); - } - __ pop(a0); // Restore deoptimizer object (class Deoptimizer). - - __ lw(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset())); - - // Replace the current (input) frame with the output frames. - Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; - // Outer loop state: t0 = current "FrameDescription** output_", - // a1 = one past the last FrameDescription**. - __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); - __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_. - __ Lsa(a1, t0, a1, kPointerSizeLog2); - __ BranchShort(&outer_loop_header); - __ bind(&outer_push_loop); - // Inner loop state: a2 = current FrameDescription*, a3 = loop index. - __ lw(a2, MemOperand(t0, 0)); // output_[ix] - __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset())); - __ BranchShort(&inner_loop_header); - __ bind(&inner_push_loop); - __ Subu(a3, a3, Operand(sizeof(uint32_t))); - __ Addu(t2, a2, Operand(a3)); - __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset())); - __ push(t3); - __ bind(&inner_loop_header); - __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg)); - - __ Addu(t0, t0, Operand(kPointerSize)); - __ bind(&outer_loop_header); - __ BranchShort(&outer_push_loop, lt, t0, Operand(a1)); - - __ lw(a1, MemOperand(a0, Deoptimizer::input_offset())); - for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { - int code = config->GetAllocatableDoubleCode(i); - const DoubleRegister fpu_reg = DoubleRegister::from_code(code); - int src_offset = code * kDoubleSize + double_regs_offset; - __ Ldc1(fpu_reg, MemOperand(a1, src_offset)); - } - - // Push pc and continuation from the last output frame. - __ lw(t2, MemOperand(a2, FrameDescription::pc_offset())); - __ push(t2); - __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset())); - __ push(t2); - - // Technically restoring 'at' should work unless zero_reg is also restored - // but it's safer to check for this. - DCHECK(!(restored_regs.has(at))); - // Restore the registers from the last output frame. - __ mov(at, a2); - for (int i = kNumberOfRegisters - 1; i >= 0; i--) { - int offset = (i * kPointerSize) + FrameDescription::registers_offset(); - if ((restored_regs.bits() & (1 << i)) != 0) { - __ lw(ToRegister(i), MemOperand(at, offset)); - } - } - - __ pop(at); // Get continuation, leave pc on stack. - __ pop(ra); - __ Jump(at); - __ stop(); -} - -} // namespace - -void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) { - Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager); -} - -void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { - Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); -} - -namespace { - -// Restarts execution either at the current or next (in execution order) -// bytecode. If there is baseline code on the shared function info, converts an -// interpreter frame into a baseline frame and continues execution in baseline -// code. Otherwise execution continues with bytecode. -void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, - bool next_bytecode, - bool is_osr = false) { - Label start; - __ bind(&start); - - // Get function from the frame. - Register closure = a1; - __ Lw(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - - // Get the Code object from the shared function info. - Register code_obj = s1; - __ Lw(code_obj, - FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); - __ Lw(code_obj, - FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); - - // Check if we have baseline code. For OSR entry it is safe to assume we - // always have baseline code. - if (!is_osr) { - Label start_with_baseline; - __ GetObjectType(code_obj, t6, t6); - __ Branch(&start_with_baseline, eq, t6, Operand(CODET_TYPE)); - - // Start with bytecode as there is no baseline code. - Builtin builtin_id = next_bytecode - ? Builtin::kInterpreterEnterAtNextBytecode - : Builtin::kInterpreterEnterAtBytecode; - __ Jump(masm->isolate()->builtins()->code_handle(builtin_id), - RelocInfo::CODE_TARGET); - - // Start with baseline code. - __ bind(&start_with_baseline); - } else if (v8_flags.debug_code) { - __ GetObjectType(code_obj, t6, t6); - __ Assert(eq, AbortReason::kExpectedBaselineData, t6, Operand(CODET_TYPE)); - } - - if (v8_flags.debug_code) { - AssertCodeIsBaseline(masm, code_obj, t2); - } - - // Replace BytecodeOffset with the feedback vector. - Register feedback_vector = a2; - __ Lw(feedback_vector, - FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); - __ Lw(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); - - Label install_baseline_code; - // Check if feedback vector is valid. If not, call prepare for baseline to - // allocate it. - __ GetObjectType(feedback_vector, t6, t6); - __ Branch(&install_baseline_code, ne, t6, Operand(FEEDBACK_VECTOR_TYPE)); - - // Save BytecodeOffset from the stack frame. - __ Lw(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - __ SmiUntag(kInterpreterBytecodeOffsetRegister); - // Replace BytecodeOffset with the feedback vector. - __ Sw(feedback_vector, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); - feedback_vector = no_reg; - - // Compute baseline pc for bytecode offset. - ExternalReference get_baseline_pc_extref; - if (next_bytecode || is_osr) { - get_baseline_pc_extref = - ExternalReference::baseline_pc_for_next_executed_bytecode(); - } else { - get_baseline_pc_extref = - ExternalReference::baseline_pc_for_bytecode_offset(); - } - - Register get_baseline_pc = a3; - __ li(get_baseline_pc, get_baseline_pc_extref); - - // If the code deoptimizes during the implicit function entry stack interrupt - // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is - // not a valid bytecode offset. - // TODO(pthier): Investigate if it is feasible to handle this special case - // in TurboFan instead of here. - Label valid_bytecode_offset, function_entry_bytecode; - if (!is_osr) { - __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, - Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + - kFunctionEntryBytecodeOffset)); - } - - __ Subu(kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeOffsetRegister, - (BytecodeArray::kHeaderSize - kHeapObjectTag)); - - __ bind(&valid_bytecode_offset); - // Get bytecode array from the stack frame. - __ Lw(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - // Save the accumulator register, since it's clobbered by the below call. - __ Push(kInterpreterAccumulatorRegister); - { - Register arg_reg_1 = a0; - Register arg_reg_2 = a1; - Register arg_reg_3 = a2; - __ Move(arg_reg_1, code_obj); - __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister); - __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister); - FrameScope scope(masm, StackFrame::INTERNAL); - __ PrepareCallCFunction(3, 0, t0); - __ CallCFunction(get_baseline_pc, 3, 0); - } - __ Addu(code_obj, code_obj, kReturnRegister0); - __ Pop(kInterpreterAccumulatorRegister); - - if (is_osr) { - // TODO(liuyu): Remove Ld as arm64 after register reallocation. - __ Lw(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister); - Generate_OSREntry(masm, code_obj, - Operand(Code::kHeaderSize - kHeapObjectTag)); - } else { - __ Addu(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag); - __ Jump(code_obj); - } - __ Trap(); // Unreachable. - - if (!is_osr) { - __ bind(&function_entry_bytecode); - // If the bytecode offset is kFunctionEntryOffset, get the start address of - // the first bytecode. - __ mov(kInterpreterBytecodeOffsetRegister, zero_reg); - if (next_bytecode) { - __ li(get_baseline_pc, - ExternalReference::baseline_pc_for_bytecode_offset()); - } - __ Branch(&valid_bytecode_offset); - } - - __ bind(&install_baseline_code); - { - FrameScope scope(masm, StackFrame::INTERNAL); - __ Push(kInterpreterAccumulatorRegister); - __ Push(closure); - __ CallRuntime(Runtime::kInstallBaselineCode, 1); - __ Pop(kInterpreterAccumulatorRegister); - } - // Retry from the start after installing baseline code. - __ Branch(&start); -} -} // namespace - -void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode( - MacroAssembler* masm) { - Generate_BaselineOrInterpreterEntry(masm, false); -} - -void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode( - MacroAssembler* masm) { - Generate_BaselineOrInterpreterEntry(masm, true); -} - -void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( - MacroAssembler* masm) { - Generate_BaselineOrInterpreterEntry(masm, false, true); -} - -void Builtins::Generate_RestartFrameTrampoline(MacroAssembler* masm) { - // Frame is being dropped: - // - Look up current function on the frame. - // - Leave the frame. - // - Restart the frame by calling the function. - - __ lw(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); - __ lw(a0, MemOperand(fp, StandardFrameConstants::kArgCOffset)); - - // Pop return address and frame. - __ LeaveFrame(StackFrame::INTERPRETED); - - __ li(a2, Operand(kDontAdaptArgumentsSentinel)); - - __ InvokeFunction(a1, a2, a0, InvokeType::kJump); -} - -#undef __ - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS diff --git a/src/codegen/assembler-arch.h b/src/codegen/assembler-arch.h index 9655c000ff..cb9f745610 100644 --- a/src/codegen/assembler-arch.h +++ b/src/codegen/assembler-arch.h @@ -17,8 +17,6 @@ #include "src/codegen/arm/assembler-arm.h" #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/assembler-ppc.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/codegen/mips/assembler-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/assembler-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/codegen/assembler-inl.h b/src/codegen/assembler-inl.h index f76995b9e8..3618e243d5 100644 --- a/src/codegen/assembler-inl.h +++ b/src/codegen/assembler-inl.h @@ -17,8 +17,6 @@ #include "src/codegen/arm/assembler-arm-inl.h" #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/assembler-ppc-inl.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/codegen/mips/assembler-mips-inl.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/assembler-mips64-inl.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/codegen/assembler.h b/src/codegen/assembler.h index 49e3d3270e..8a8164d073 100644 --- a/src/codegen/assembler.h +++ b/src/codegen/assembler.h @@ -271,8 +271,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { int pc_offset() const { return static_cast(pc_ - buffer_start_); } int pc_offset_for_safepoint() { -#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ - defined(V8_TARGET_ARCH_LOONG64) +#if defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LOONG64) // MIPS and LOONG need to use their own implementation to avoid trampoline's // influence. UNREACHABLE(); diff --git a/src/codegen/constants-arch.h b/src/codegen/constants-arch.h index b5c6514331..1c8a0f545d 100644 --- a/src/codegen/constants-arch.h +++ b/src/codegen/constants-arch.h @@ -11,8 +11,6 @@ #include "src/codegen/arm64/constants-arm64.h" #elif V8_TARGET_ARCH_IA32 #include "src/codegen/ia32/constants-ia32.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/codegen/mips/constants-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/constants-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/codegen/cpu-features.h b/src/codegen/cpu-features.h index 8b177826b6..dc087da1c9 100644 --- a/src/codegen/cpu-features.h +++ b/src/codegen/cpu-features.h @@ -44,7 +44,7 @@ enum CpuFeature { #elif V8_TARGET_ARCH_ARM64 JSCVT, -#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#elif V8_TARGET_ARCH_MIPS64 FPU, FP64FPU, MIPSr1, diff --git a/src/codegen/external-reference.cc b/src/codegen/external-reference.cc index ef1f184c5f..542be96400 100644 --- a/src/codegen/external-reference.cc +++ b/src/codegen/external-reference.cc @@ -766,8 +766,6 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() { #define re_stack_check_func RegExpMacroAssemblerARM::CheckStackGuardState #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #define re_stack_check_func RegExpMacroAssemblerPPC::CheckStackGuardState -#elif V8_TARGET_ARCH_MIPS -#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState #elif V8_TARGET_ARCH_MIPS64 #define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/codegen/interface-descriptors-inl.h b/src/codegen/interface-descriptors-inl.h index 841f057513..bf2be9db13 100644 --- a/src/codegen/interface-descriptors-inl.h +++ b/src/codegen/interface-descriptors-inl.h @@ -25,8 +25,6 @@ #include "src/codegen/s390/interface-descriptors-s390-inl.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/interface-descriptors-mips64-inl.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/codegen/mips/interface-descriptors-mips-inl.h" #elif V8_TARGET_ARCH_LOONG64 #include "src/codegen/loong64/interface-descriptors-loong64-inl.h" #elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 @@ -233,7 +231,7 @@ constexpr RegList WriteBarrierDescriptor::ComputeSavedRegisters( saved_registers.set(SlotAddressRegister()); } #elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_LOONG64 || \ - V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS + V8_TARGET_ARCH_MIPS64 if (object != ObjectRegister()) saved_registers.set(ObjectRegister()); // The slot address is always clobbered. saved_registers.set(SlotAddressRegister()); @@ -333,9 +331,9 @@ constexpr auto LoadWithReceiverBaselineDescriptor::registers() { // static constexpr auto BaselineOutOfLinePrologueDescriptor::registers() { // TODO(v8:11421): Implement on other platforms. -#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \ - V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \ - V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \ +#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \ + V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \ + V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 return RegisterArray( kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, @@ -357,7 +355,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() { #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ - V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 + V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 return RegisterArray(ParamsSizeRegister(), WeightRegister()); #else return DefaultRegisterArray(); diff --git a/src/codegen/interface-descriptors.cc b/src/codegen/interface-descriptors.cc index 5f7d09fb2a..2d42960ba1 100644 --- a/src/codegen/interface-descriptors.cc +++ b/src/codegen/interface-descriptors.cc @@ -118,7 +118,7 @@ const char* CallInterfaceDescriptor::DebugName() const { } bool CallInterfaceDescriptor::IsValidFloatParameterRegister(Register reg) { -#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) +#if defined(V8_TARGET_ARCH_MIPS64) return reg.code() % 2 == 0; #else return true; diff --git a/src/codegen/macro-assembler.h b/src/codegen/macro-assembler.h index 3fb7b0ca58..aaf30dea62 100644 --- a/src/codegen/macro-assembler.h +++ b/src/codegen/macro-assembler.h @@ -51,9 +51,6 @@ enum class SmiCheck { kOmit, kInline }; #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/constants-ppc.h" #include "src/codegen/ppc/macro-assembler-ppc.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/codegen/mips/constants-mips.h" -#include "src/codegen/mips/macro-assembler-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/constants-mips64.h" #include "src/codegen/mips64/macro-assembler-mips64.h" diff --git a/src/codegen/mips/assembler-mips-inl.h b/src/codegen/mips/assembler-mips-inl.h deleted file mode 100644 index 56e1bf2cfc..0000000000 --- a/src/codegen/mips/assembler-mips-inl.h +++ /dev/null @@ -1,355 +0,0 @@ - -// Copyright (c) 1994-2006 Sun Microsystems Inc. -// All Rights Reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// - Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// - Redistribution in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// - Neither the name of Sun Microsystems or the names of contributors may -// be used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// The original source code covered by the above license above has been -// modified significantly by Google Inc. -// Copyright 2012 the V8 project authors. All rights reserved. - -#ifndef V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_ -#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_ - -#include "src/codegen/mips/assembler-mips.h" - -#include "src/codegen/assembler.h" -#include "src/debug/debug.h" -#include "src/objects/objects-inl.h" - -namespace v8 { -namespace internal { - -bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); } - -// ----------------------------------------------------------------------------- -// Operand and MemOperand. - -bool Operand::is_reg() const { return rm_.is_valid(); } - -int32_t Operand::immediate() const { - DCHECK(!is_reg()); - DCHECK(!IsHeapObjectRequest()); - return value_.immediate; -} - -// ----------------------------------------------------------------------------- -// RelocInfo. - -void RelocInfo::apply(intptr_t delta) { - if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { - // Absolute code pointer inside code object moves with the code object. - Assembler::RelocateInternalReference(rmode_, pc_, delta); - } else if (IsRelativeCodeTarget(rmode_)) { - Assembler::RelocateRelativeReference(rmode_, pc_, delta); - } -} - -Address RelocInfo::target_address() { - DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) || - IsWasmCall(rmode_)); - return Assembler::target_address_at(pc_, constant_pool_); -} - -Address RelocInfo::target_address_address() { - DCHECK(HasTargetAddressAddress()); - // Read the address of the word containing the target_address in an - // instruction stream. - // The only architecture-independent user of this function is the serializer. - // The serializer uses it to find out how many raw bytes of instruction to - // output before the next target. - // For an instruction like LUI/ORI where the target bits are mixed into the - // instruction bits, the size of the target will be zero, indicating that the - // serializer should not step forward in memory after a target is resolved - // and written. In this case the target_address_address function should - // return the end of the instructions to be patched, allowing the - // deserializer to deserialize the instructions as raw bytes and put them in - // place, ready to be patched with the target. After jump optimization, - // that is the address of the instruction that follows J/JAL/JR/JALR - // instruction. - if (IsMipsArchVariant(kMips32r6)) { - // On R6 we don't move to the end of the instructions to be patched, but one - // instruction before, because if these instructions are at the end of the - // code object it can cause errors in the deserializer. - return pc_ + (Assembler::kInstructionsFor32BitConstant - 1) * kInstrSize; - } else { - return pc_ + Assembler::kInstructionsFor32BitConstant * kInstrSize; - } -} - -Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); } - -int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; } - -void Assembler::deserialization_set_special_target_at( - Address instruction_payload, Code code, Address target) { - set_target_address_at(instruction_payload, - !code.is_null() ? code.constant_pool() : kNullAddress, - target); -} - -int Assembler::deserialization_special_target_size( - Address instruction_payload) { - return kSpecialTargetSize; -} - -void Assembler::set_target_internal_reference_encoded_at(Address pc, - Address target) { - Instr instr1 = Assembler::instr_at(pc + 0 * kInstrSize); - Instr instr2 = Assembler::instr_at(pc + 1 * kInstrSize); - DCHECK(Assembler::IsLui(instr1)); - DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2)); - instr1 &= ~kImm16Mask; - instr2 &= ~kImm16Mask; - int32_t imm = static_cast(target); - DCHECK_EQ(imm & 3, 0); - if (Assembler::IsJicOrJialc(instr2)) { - // Encoded internal references are lui/jic load of 32-bit absolute address. - uint32_t lui_offset_u, jic_offset_u; - Assembler::UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u); - - Assembler::instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u); - Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u); - } else { - // Encoded internal references are lui/ori load of 32-bit absolute address. - PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, - 1 * kInstrSize); - } - - // Currently used only by deserializer, and all code will be flushed - // after complete deserialization, no need to flush on each reference. -} - -void Assembler::deserialization_set_target_internal_reference_at( - Address pc, Address target, RelocInfo::Mode mode) { - if (RelocInfo::IsInternalReferenceEncoded(mode)) { - DCHECK(IsLui(instr_at(pc))); - set_target_internal_reference_encoded_at(pc, target); - } else { - DCHECK(RelocInfo::IsInternalReference(mode)); - Memory
(pc) = target; - } -} - -HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { - DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) || - IsDataEmbeddedObject(rmode_)); - if (IsDataEmbeddedObject(rmode_)) { - return HeapObject::cast(Object(ReadUnalignedValue
(pc_))); - } - return HeapObject::cast( - Object(Assembler::target_address_at(pc_, constant_pool_))); -} - -Handle RelocInfo::target_object_handle(Assembler* origin) { - if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) { - return Handle(reinterpret_cast( - Assembler::target_address_at(pc_, constant_pool_))); - } else if (IsDataEmbeddedObject(rmode_)) { - return Handle::cast(ReadUnalignedValue>(pc_)); - } - DCHECK(IsRelativeCodeTarget(rmode_)); - return origin->relative_code_target_object_handle_at(pc_); -} - -void RelocInfo::set_target_object(Heap* heap, HeapObject target, - WriteBarrierMode write_barrier_mode, - ICacheFlushMode icache_flush_mode) { - DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) || - IsDataEmbeddedObject(rmode_)); - if (IsDataEmbeddedObject(rmode_)) { - WriteUnalignedValue(pc_, target.ptr()); - // No need to flush icache since no instructions were changed. - } else { - Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), - icache_flush_mode); - } - if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() && - !v8_flags.disable_write_barriers) { - WriteBarrierForCode(host(), this, target); - } -} - -Address RelocInfo::target_external_reference() { - DCHECK(IsExternalReference(rmode_)); - return Assembler::target_address_at(pc_, constant_pool_); -} - -void RelocInfo::set_target_external_reference( - Address target, ICacheFlushMode icache_flush_mode) { - DCHECK(IsExternalReference(rmode_)); - Assembler::set_target_address_at(pc_, constant_pool_, target, - icache_flush_mode); -} - -Address RelocInfo::target_internal_reference() { - if (IsInternalReference(rmode_)) { - return Memory
(pc_); - } else { - // Encoded internal references are lui/ori or lui/jic load of 32-bit - // absolute address. - DCHECK(IsInternalReferenceEncoded(rmode_)); - Instr instr1 = Assembler::instr_at(pc_ + 0 * kInstrSize); - Instr instr2 = Assembler::instr_at(pc_ + 1 * kInstrSize); - DCHECK(Assembler::IsLui(instr1)); - DCHECK(Assembler::IsOri(instr2) || Assembler::IsJicOrJialc(instr2)); - if (Assembler::IsJicOrJialc(instr2)) { - return static_cast
( - Assembler::CreateTargetAddress(instr1, instr2)); - } - return static_cast
(Assembler::GetLuiOriImmediate(instr1, instr2)); - } -} - -Address RelocInfo::target_internal_reference_address() { - DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)); - return pc_; -} - -Builtin RelocInfo::target_builtin_at(Assembler* origin) { UNREACHABLE(); } - -Address RelocInfo::target_runtime_entry(Assembler* origin) { - DCHECK(IsRuntimeEntry(rmode_)); - return target_address(); -} - -void RelocInfo::set_target_runtime_entry(Address target, - WriteBarrierMode write_barrier_mode, - ICacheFlushMode icache_flush_mode) { - DCHECK(IsRuntimeEntry(rmode_)); - if (target_address() != target) - set_target_address(target, write_barrier_mode, icache_flush_mode); -} - -Address RelocInfo::target_off_heap_target() { - DCHECK(IsOffHeapTarget(rmode_)); - return Assembler::target_address_at(pc_, constant_pool_); -} - -void RelocInfo::WipeOut() { - DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || - IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || - IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) || - IsOffHeapTarget(rmode_)); - if (IsInternalReference(rmode_)) { - Memory
(pc_) = kNullAddress; - } else if (IsInternalReferenceEncoded(rmode_)) { - Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress); - } else { - Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress); - } -} - -Handle Assembler::relative_code_target_object_handle_at( - Address pc) const { - Instr instr1 = instr_at(pc); - Instr instr2 = instr_at(pc + kInstrSize); - DCHECK(IsLui(instr1)); - DCHECK(IsOri(instr2) || IsNal(instr2)); - DCHECK(IsNal(instr2) || IsNal(instr_at(pc - kInstrSize))); - if (IsNal(instr2)) { - instr2 = instr_at(pc + 2 * kInstrSize); - } - // Interpret 2 instructions generated by li (lui/ori). - int code_target_index = GetLuiOriImmediate(instr1, instr2); - return GetCodeTarget(code_target_index); -} - -// ----------------------------------------------------------------------------- -// Assembler. - -void Assembler::CheckBuffer() { - if (buffer_space() <= kGap) { - GrowBuffer(); - } -} - -void Assembler::CheckForEmitInForbiddenSlot() { - if (!is_buffer_growth_blocked()) { - CheckBuffer(); - } - if (IsPrevInstrCompactBranch()) { - // Nop instruction to precede a CTI in forbidden slot: - Instr nop = SPECIAL | SLL; - *reinterpret_cast(pc_) = nop; - pc_ += kInstrSize; - - ClearCompactBranchState(); - } -} - -void Assembler::EmitHelper(Instr x, CompactBranchType is_compact_branch) { - if (IsPrevInstrCompactBranch()) { - if (Instruction::IsForbiddenAfterBranchInstr(x)) { - // Nop instruction to precede a CTI in forbidden slot: - Instr nop = SPECIAL | SLL; - *reinterpret_cast(pc_) = nop; - pc_ += kInstrSize; - } - ClearCompactBranchState(); - } - *reinterpret_cast(pc_) = x; - pc_ += kInstrSize; - if (is_compact_branch == CompactBranchType::COMPACT_BRANCH) { - EmittedCompactBranchInstruction(); - } - CheckTrampolinePoolQuick(); -} - -template <> -inline void Assembler::EmitHelper(uint8_t x); - -template -void Assembler::EmitHelper(T x) { - *reinterpret_cast(pc_) = x; - pc_ += sizeof(x); - CheckTrampolinePoolQuick(); -} - -template <> -void Assembler::EmitHelper(uint8_t x) { - *reinterpret_cast(pc_) = x; - pc_ += sizeof(x); - if (reinterpret_cast(pc_) % kInstrSize == 0) { - CheckTrampolinePoolQuick(); - } -} - -void Assembler::emit(Instr x, CompactBranchType is_compact_branch) { - if (!is_buffer_growth_blocked()) { - CheckBuffer(); - } - EmitHelper(x, is_compact_branch); -} - -EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); } - -} // namespace internal -} // namespace v8 - -#endif // V8_CODEGEN_MIPS_ASSEMBLER_MIPS_INL_H_ diff --git a/src/codegen/mips/assembler-mips.cc b/src/codegen/mips/assembler-mips.cc deleted file mode 100644 index 8bcc6a2e08..0000000000 --- a/src/codegen/mips/assembler-mips.cc +++ /dev/null @@ -1,3854 +0,0 @@ -// Copyright (c) 1994-2006 Sun Microsystems Inc. -// All Rights Reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// - Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// - Redistribution in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// - Neither the name of Sun Microsystems or the names of contributors may -// be used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// The original source code covered by the above license above has been -// modified significantly by Google Inc. -// Copyright 2012 the V8 project authors. All rights reserved. - -#include "src/codegen/mips/assembler-mips.h" - -#if V8_TARGET_ARCH_MIPS - -#include "src/base/bits.h" -#include "src/base/cpu.h" -#include "src/codegen/flush-instruction-cache.h" -#include "src/codegen/mips/assembler-mips-inl.h" -#include "src/codegen/safepoint-table.h" -#include "src/codegen/string-constants.h" -#include "src/deoptimizer/deoptimizer.h" -#include "src/objects/heap-number-inl.h" - -namespace v8 { -namespace internal { - -// Get the CPU features enabled by the build. For cross compilation the -// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS -// can be defined to enable FPU instructions when building the -// snapshot. -static unsigned CpuFeaturesImpliedByCompiler() { - unsigned answer = 0; -#ifdef CAN_USE_FPU_INSTRUCTIONS - answer |= 1u << FPU; -#endif // def CAN_USE_FPU_INSTRUCTIONS - - // If the compiler is allowed to use FPU then we can use FPU too in our code - // generation even when generating snapshots. This won't work for cross - // compilation. -#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0 - answer |= 1u << FPU; -#endif - - return answer; -} - -bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(MIPS_SIMD); } - -void CpuFeatures::ProbeImpl(bool cross_compile) { - supported_ |= CpuFeaturesImpliedByCompiler(); - - // Only use statically determined features for cross compile (snapshot). - if (cross_compile) return; - - // If the compiler is allowed to use fpu then we can use fpu too in our - // code generation. -#ifndef __mips__ - // For the simulator build, use FPU. - supported_ |= 1u << FPU; -#if defined(_MIPS_ARCH_MIPS32R6) - // FP64 mode is implied on r6. - supported_ |= 1u << FP64FPU; -#if defined(_MIPS_MSA) - supported_ |= 1u << MIPS_SIMD; -#endif -#endif -#if defined(FPU_MODE_FP64) - supported_ |= 1u << FP64FPU; -#endif -#else - // Probe for additional features at runtime. - base::CPU cpu; - if (cpu.has_fpu()) supported_ |= 1u << FPU; -#if defined(FPU_MODE_FPXX) - if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU; -#elif defined(FPU_MODE_FP64) - supported_ |= 1u << FP64FPU; -#if defined(_MIPS_ARCH_MIPS32R6) -#if defined(_MIPS_MSA) - supported_ |= 1u << MIPS_SIMD; -#else - if (cpu.has_msa()) supported_ |= 1u << MIPS_SIMD; -#endif -#endif -#endif -#if defined(_MIPS_ARCH_MIPS32RX) - if (cpu.architecture() == 6) { - supported_ |= 1u << MIPSr6; - } else if (cpu.architecture() == 2) { - supported_ |= 1u << MIPSr1; - supported_ |= 1u << MIPSr2; - } else { - supported_ |= 1u << MIPSr1; - } -#endif -#endif - - // Set a static value on whether Simd is supported. - // This variable is only used for certain archs to query SupportWasmSimd128() - // at runtime in builtins using an extern ref. Other callers should use - // CpuFeatures::SupportWasmSimd128(). - CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128(); -} - -void CpuFeatures::PrintTarget() {} -void CpuFeatures::PrintFeatures() {} - -int ToNumber(Register reg) { - DCHECK(reg.is_valid()); - const int kNumbers[] = { - 0, // zero_reg - 1, // at - 2, // v0 - 3, // v1 - 4, // a0 - 5, // a1 - 6, // a2 - 7, // a3 - 8, // t0 - 9, // t1 - 10, // t2 - 11, // t3 - 12, // t4 - 13, // t5 - 14, // t6 - 15, // t7 - 16, // s0 - 17, // s1 - 18, // s2 - 19, // s3 - 20, // s4 - 21, // s5 - 22, // s6 - 23, // s7 - 24, // t8 - 25, // t9 - 26, // k0 - 27, // k1 - 28, // gp - 29, // sp - 30, // fp - 31, // ra - }; - return kNumbers[reg.code()]; -} - -Register ToRegister(int num) { - DCHECK(num >= 0 && num < kNumRegisters); - const Register kRegisters[] = { - zero_reg, at, v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, t7, - s0, s1, s2, s3, s4, s5, s6, s7, t8, t9, k0, k1, gp, sp, fp, ra}; - return kRegisters[num]; -} - -// ----------------------------------------------------------------------------- -// Implementation of RelocInfo. - -const int RelocInfo::kApplyMask = - RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | - RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) | - RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET); - -bool RelocInfo::IsCodedSpecially() { - // The deserializer needs to know whether a pointer is specially coded. Being - // specially coded on MIPS means that it is a lui/ori instruction, and that is - // always the case inside code objects. - return true; -} - -bool RelocInfo::IsInConstantPool() { return false; } - -uint32_t RelocInfo::wasm_call_tag() const { - DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL); - return static_cast( - Assembler::target_address_at(pc_, constant_pool_)); -} - -// ----------------------------------------------------------------------------- -// Implementation of Operand and MemOperand. -// See assembler-mips-inl.h for inlined constructors. - -Operand::Operand(Handle handle) - : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) { - value_.immediate = static_cast(handle.address()); -} - -Operand Operand::EmbeddedNumber(double value) { - int32_t smi; - if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi)); - Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); - result.is_heap_object_request_ = true; - result.value_.heap_object_request = HeapObjectRequest(value); - return result; -} - -Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) { - Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT); - result.is_heap_object_request_ = true; - result.value_.heap_object_request = HeapObjectRequest(str); - return result; -} - -MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) { - offset_ = offset; -} - -MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier, - OffsetAddend offset_addend) - : Operand(rm) { - offset_ = unit * multiplier + offset_addend; -} - -void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { - DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty()); - for (auto& request : heap_object_requests_) { - Handle object; - switch (request.kind()) { - case HeapObjectRequest::kHeapNumber: - object = isolate->factory()->NewHeapNumber( - request.heap_number()); - break; - case HeapObjectRequest::kStringConstant: - const StringConstantBase* str = request.string(); - CHECK_NOT_NULL(str); - object = str->AllocateStringConstant(isolate); - break; - } - Address pc = reinterpret_cast
(buffer_start_) + request.offset(); - set_target_value_at(pc, reinterpret_cast(object.location())); - } -} - -// ----------------------------------------------------------------------------- -// Specific instructions, constants, and masks. - -static const int kNegOffset = 0x00008000; -// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r) -// operations as post-increment of sp. -const Instr kPopInstruction = ADDIU | (sp.code() << kRsShift) | - (sp.code() << kRtShift) | - (kPointerSize & kImm16Mask); -// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp. -const Instr kPushInstruction = ADDIU | (sp.code() << kRsShift) | - (sp.code() << kRtShift) | - (-kPointerSize & kImm16Mask); -// sw(r, MemOperand(sp, 0)) -const Instr kPushRegPattern = SW | (sp.code() << kRsShift) | (0 & kImm16Mask); -// lw(r, MemOperand(sp, 0)) -const Instr kPopRegPattern = LW | (sp.code() << kRsShift) | (0 & kImm16Mask); - -const Instr kLwRegFpOffsetPattern = - LW | (fp.code() << kRsShift) | (0 & kImm16Mask); - -const Instr kSwRegFpOffsetPattern = - SW | (fp.code() << kRsShift) | (0 & kImm16Mask); - -const Instr kLwRegFpNegOffsetPattern = - LW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); - -const Instr kSwRegFpNegOffsetPattern = - SW | (fp.code() << kRsShift) | (kNegOffset & kImm16Mask); -// A mask for the Rt register for push, pop, lw, sw instructions. -const Instr kRtMask = kRtFieldMask; -const Instr kLwSwInstrTypeMask = 0xFFE00000; -const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; -const Instr kLwSwOffsetMask = kImm16Mask; - -Assembler::Assembler(const AssemblerOptions& options, - std::unique_ptr buffer) - : AssemblerBase(options, std::move(buffer)), scratch_register_list_({at}) { - reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); - - last_trampoline_pool_end_ = 0; - no_trampoline_pool_before_ = 0; - trampoline_pool_blocked_nesting_ = 0; - // We leave space (16 * kTrampolineSlotsSize) - // for BlockTrampolinePoolScope buffer. - next_buffer_check_ = v8_flags.force_long_branches - ? kMaxInt - : kMaxBranchOffset - kTrampolineSlotsSize * 16; - internal_trampoline_exception_ = false; - last_bound_pos_ = 0; - - trampoline_emitted_ = v8_flags.force_long_branches; - unbound_labels_count_ = 0; - block_buffer_growth_ = false; -} - -void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, - SafepointTableBuilder* safepoint_table_builder, - int handler_table_offset) { - // As a crutch to avoid having to add manual Align calls wherever we use a - // raw workflow to create Code objects (mostly in tests), add another Align - // call here. It does no harm - the end of the Code object is aligned to the - // (larger) kCodeAlignment anyways. - // TODO(jgruber): Consider moving responsibility for proper alignment to - // metadata table builders (safepoint, handler, constant pool, code - // comments). - DataAlign(Code::kMetadataAlignment); - - EmitForbiddenSlotInstruction(); - - int code_comments_size = WriteCodeComments(); - - DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. - - AllocateAndInstallRequestedHeapObjects(isolate); - - // Set up code descriptor. - // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to - // this point to make CodeDesc initialization less fiddly. - - static constexpr int kConstantPoolSize = 0; - const int instruction_size = pc_offset(); - const int code_comments_offset = instruction_size - code_comments_size; - const int constant_pool_offset = code_comments_offset - kConstantPoolSize; - const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable) - ? constant_pool_offset - : handler_table_offset; - const int safepoint_table_offset = - (safepoint_table_builder == kNoSafepointTable) - ? handler_table_offset2 - : safepoint_table_builder->safepoint_table_offset(); - const int reloc_info_offset = - static_cast(reloc_info_writer.pos() - buffer_->start()); - CodeDesc::Initialize(desc, this, safepoint_table_offset, - handler_table_offset2, constant_pool_offset, - code_comments_offset, reloc_info_offset); -} - -void Assembler::Align(int m) { - DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m)); - EmitForbiddenSlotInstruction(); - while ((pc_offset() & (m - 1)) != 0) { - nop(); - } -} - -void Assembler::CodeTargetAlign() { - // No advantage to aligning branch/call targets to more than - // single instruction, that I am aware of. - Align(4); -} - -Register Assembler::GetRtReg(Instr instr) { - return Register::from_code((instr & kRtFieldMask) >> kRtShift); -} - -Register Assembler::GetRsReg(Instr instr) { - return Register::from_code((instr & kRsFieldMask) >> kRsShift); -} - -Register Assembler::GetRdReg(Instr instr) { - return Register::from_code((instr & kRdFieldMask) >> kRdShift); -} - -uint32_t Assembler::GetRt(Instr instr) { - return (instr & kRtFieldMask) >> kRtShift; -} - -uint32_t Assembler::GetRtField(Instr instr) { return instr & kRtFieldMask; } - -uint32_t Assembler::GetRs(Instr instr) { - return (instr & kRsFieldMask) >> kRsShift; -} - -uint32_t Assembler::GetRsField(Instr instr) { return instr & kRsFieldMask; } - -uint32_t Assembler::GetRd(Instr instr) { - return (instr & kRdFieldMask) >> kRdShift; -} - -uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; } - -uint32_t Assembler::GetSa(Instr instr) { - return (instr & kSaFieldMask) >> kSaShift; -} - -uint32_t Assembler::GetSaField(Instr instr) { return instr & kSaFieldMask; } - -uint32_t Assembler::GetOpcodeField(Instr instr) { return instr & kOpcodeMask; } - -uint32_t Assembler::GetFunction(Instr instr) { - return (instr & kFunctionFieldMask) >> kFunctionShift; -} - -uint32_t Assembler::GetFunctionField(Instr instr) { - return instr & kFunctionFieldMask; -} - -uint32_t Assembler::GetImmediate16(Instr instr) { return instr & kImm16Mask; } - -uint32_t Assembler::GetLabelConst(Instr instr) { return instr & ~kImm16Mask; } - -bool Assembler::IsPop(Instr instr) { - return (instr & ~kRtMask) == kPopRegPattern; -} - -bool Assembler::IsPush(Instr instr) { - return (instr & ~kRtMask) == kPushRegPattern; -} - -bool Assembler::IsSwRegFpOffset(Instr instr) { - return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern); -} - -bool Assembler::IsLwRegFpOffset(Instr instr) { - return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern); -} - -bool Assembler::IsSwRegFpNegOffset(Instr instr) { - return ((instr & (kLwSwInstrTypeMask | kNegOffset)) == - kSwRegFpNegOffsetPattern); -} - -bool Assembler::IsLwRegFpNegOffset(Instr instr) { - return ((instr & (kLwSwInstrTypeMask | kNegOffset)) == - kLwRegFpNegOffsetPattern); -} - -// Labels refer to positions in the (to be) generated code. -// There are bound, linked, and unused labels. -// -// Bound labels refer to known positions in the already -// generated code. pos() is the position the label refers to. -// -// Linked labels refer to unknown positions in the code -// to be generated; pos() is the position of the last -// instruction using the label. - -// The link chain is terminated by a value in the instruction of -1, -// which is an otherwise illegal value (branch -1 is inf loop). -// The instruction 16-bit offset field addresses 32-bit words, but in -// code is conv to an 18-bit value addressing bytes, hence the -4 value. - -const int kEndOfChain = -4; -// Determines the end of the Jump chain (a subset of the label link chain). -const int kEndOfJumpChain = 0; - -bool Assembler::IsMsaBranch(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - uint32_t rs_field = GetRsField(instr); - if (opcode == COP1) { - switch (rs_field) { - case BZ_V: - case BZ_B: - case BZ_H: - case BZ_W: - case BZ_D: - case BNZ_V: - case BNZ_B: - case BNZ_H: - case BNZ_W: - case BNZ_D: - return true; - default: - return false; - } - } else { - return false; - } -} - -bool Assembler::IsBranch(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - uint32_t rt_field = GetRtField(instr); - uint32_t rs_field = GetRsField(instr); - // Checks if the instruction is a branch. - bool isBranch = - opcode == BEQ || opcode == BNE || opcode == BLEZ || opcode == BGTZ || - opcode == BEQL || opcode == BNEL || opcode == BLEZL || opcode == BGTZL || - (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ || - rt_field == BLTZAL || rt_field == BGEZAL)) || - (opcode == COP1 && rs_field == BC1) || // Coprocessor branch. - (opcode == COP1 && rs_field == BC1EQZ) || - (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr); - if (!isBranch && IsMipsArchVariant(kMips32r6)) { - // All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and - // POP30 (BNVC, BNEC, BNEZALC) are branch ops. - isBranch |= opcode == POP10 || opcode == POP30 || opcode == BC || - opcode == BALC || - (opcode == POP66 && rs_field != 0) || // BEQZC - (opcode == POP76 && rs_field != 0); // BNEZC - } - return isBranch; -} - -bool Assembler::IsBc(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - // Checks if the instruction is a BC or BALC. - return opcode == BC || opcode == BALC; -} - -bool Assembler::IsNal(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - uint32_t rt_field = GetRtField(instr); - uint32_t rs_field = GetRsField(instr); - return opcode == REGIMM && rt_field == BLTZAL && rs_field == 0; -} - -bool Assembler::IsBzc(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - // Checks if the instruction is BEQZC or BNEZC. - return (opcode == POP66 && GetRsField(instr) != 0) || - (opcode == POP76 && GetRsField(instr) != 0); -} - -bool Assembler::IsEmittedConstant(Instr instr) { - uint32_t label_constant = GetLabelConst(instr); - return label_constant == 0; // Emitted label const in reg-exp engine. -} - -bool Assembler::IsBeq(Instr instr) { return GetOpcodeField(instr) == BEQ; } - -bool Assembler::IsBne(Instr instr) { return GetOpcodeField(instr) == BNE; } - -bool Assembler::IsBeqzc(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - return opcode == POP66 && GetRsField(instr) != 0; -} - -bool Assembler::IsBnezc(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - return opcode == POP76 && GetRsField(instr) != 0; -} - -bool Assembler::IsBeqc(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - uint32_t rs = GetRsField(instr); - uint32_t rt = GetRtField(instr); - return opcode == POP10 && rs != 0 && rs < rt; // && rt != 0 -} - -bool Assembler::IsBnec(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - uint32_t rs = GetRsField(instr); - uint32_t rt = GetRtField(instr); - return opcode == POP30 && rs != 0 && rs < rt; // && rt != 0 -} - -bool Assembler::IsJicOrJialc(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - uint32_t rs = GetRsField(instr); - return (opcode == POP66 || opcode == POP76) && rs == 0; -} - -bool Assembler::IsJump(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - uint32_t rt_field = GetRtField(instr); - uint32_t rd_field = GetRdField(instr); - uint32_t function_field = GetFunctionField(instr); - // Checks if the instruction is a jump. - return opcode == J || opcode == JAL || - (opcode == SPECIAL && rt_field == 0 && - ((function_field == JALR) || - (rd_field == 0 && (function_field == JR)))); -} - -bool Assembler::IsJ(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - // Checks if the instruction is a jump. - return opcode == J; -} - -bool Assembler::IsJal(Instr instr) { return GetOpcodeField(instr) == JAL; } - -bool Assembler::IsJr(Instr instr) { - if (!IsMipsArchVariant(kMips32r6)) { - return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR; - } else { - return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) == 0 && - GetFunctionField(instr) == JALR; - } -} - -bool Assembler::IsJalr(Instr instr) { - return GetOpcodeField(instr) == SPECIAL && GetRdField(instr) != 0 && - GetFunctionField(instr) == JALR; -} - -bool Assembler::IsLui(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - // Checks if the instruction is a load upper immediate. - return opcode == LUI; -} - -bool Assembler::IsOri(Instr instr) { - uint32_t opcode = GetOpcodeField(instr); - // Checks if the instruction is a load upper immediate. - return opcode == ORI; -} - -bool Assembler::IsAddu(Instr instr, Register rd, Register rs, Register rt) { - uint32_t opcode = GetOpcodeField(instr); - uint32_t rd_field = GetRd(instr); - uint32_t rs_field = GetRs(instr); - uint32_t rt_field = GetRt(instr); - uint32_t sa_field = GetSaField(instr); - uint32_t rd_reg = static_cast(rd.code()); - uint32_t rs_reg = static_cast(rs.code()); - uint32_t rt_reg = static_cast(rt.code()); - uint32_t function_field = GetFunction(instr); - return opcode == SPECIAL && sa_field == 0 && function_field == ADDU && - rd_reg == rd_field && rs_reg == rs_field && rt_reg == rt_field; -} - -bool Assembler::IsMov(Instr instr, Register rd, Register rs) { - uint32_t opcode = GetOpcodeField(instr); - uint32_t rd_field = GetRd(instr); - uint32_t rs_field = GetRs(instr); - uint32_t rt_field = GetRt(instr); - uint32_t rd_reg = static_cast(rd.code()); - uint32_t rs_reg = static_cast(rs.code()); - uint32_t function_field = GetFunctionField(instr); - // Checks if the instruction is a OR with zero_reg argument (aka MOV). - bool res = opcode == SPECIAL && function_field == OR && rd_field == rd_reg && - rs_field == rs_reg && rt_field == 0; - return res; -} - -bool Assembler::IsNop(Instr instr, unsigned int type) { - // See Assembler::nop(type). - DCHECK_LT(type, 32); - uint32_t opcode = GetOpcodeField(instr); - uint32_t function = GetFunctionField(instr); - uint32_t rt = GetRt(instr); - uint32_t rd = GetRd(instr); - uint32_t sa = GetSa(instr); - - // Traditional mips nop == sll(zero_reg, zero_reg, 0) - // When marking non-zero type, use sll(zero_reg, at, type) - // to avoid use of mips ssnop and ehb special encodings - // of the sll instruction. - - Register nop_rt_reg = (type == 0) ? zero_reg : at; - bool ret = (opcode == SPECIAL && function == SLL && - rd == static_cast(ToNumber(zero_reg)) && - rt == static_cast(ToNumber(nop_rt_reg)) && sa == type); - - return ret; -} - -int32_t Assembler::GetBranchOffset(Instr instr) { - DCHECK(IsBranch(instr)); - return (static_cast(instr & kImm16Mask)) << 2; -} - -bool Assembler::IsLw(Instr instr) { - return (static_cast(instr & kOpcodeMask) == LW); -} - -int16_t Assembler::GetLwOffset(Instr instr) { - DCHECK(IsLw(instr)); - return ((instr & kImm16Mask)); -} - -Instr Assembler::SetLwOffset(Instr instr, int16_t offset) { - DCHECK(IsLw(instr)); - - // We actually create a new lw instruction based on the original one. - Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask) | - (offset & kImm16Mask); - - return temp_instr; -} - -bool Assembler::IsSw(Instr instr) { - return (static_cast(instr & kOpcodeMask) == SW); -} - -Instr Assembler::SetSwOffset(Instr instr, int16_t offset) { - DCHECK(IsSw(instr)); - return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); -} - -bool Assembler::IsAddImmediate(Instr instr) { - return ((instr & kOpcodeMask) == ADDIU); -} - -Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) { - DCHECK(IsAddImmediate(instr)); - return ((instr & ~kImm16Mask) | (offset & kImm16Mask)); -} - -bool Assembler::IsAndImmediate(Instr instr) { - return GetOpcodeField(instr) == ANDI; -} - -static Assembler::OffsetSize OffsetSizeInBits(Instr instr) { - if (IsMipsArchVariant(kMips32r6)) { - if (Assembler::IsBc(instr)) { - return Assembler::OffsetSize::kOffset26; - } else if (Assembler::IsBzc(instr)) { - return Assembler::OffsetSize::kOffset21; - } - } - return Assembler::OffsetSize::kOffset16; -} - -static inline int32_t AddBranchOffset(int pos, Instr instr) { - int bits = OffsetSizeInBits(instr); - const int32_t mask = (1 << bits) - 1; - bits = 32 - bits; - - // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming - // the compiler uses arithmetic shifts for signed integers. - int32_t imm = ((instr & mask) << bits) >> (bits - 2); - - if (imm == kEndOfChain) { - // EndOfChain sentinel is returned directly, not relative to pc or pos. - return kEndOfChain; - } else { - return pos + Assembler::kBranchPCOffset + imm; - } -} - -uint32_t Assembler::CreateTargetAddress(Instr instr_lui, Instr instr_jic) { - DCHECK(IsLui(instr_lui) && IsJicOrJialc(instr_jic)); - int16_t jic_offset = GetImmediate16(instr_jic); - int16_t lui_offset = GetImmediate16(instr_lui); - - if (jic_offset < 0) { - lui_offset += kImm16Mask; - } - uint32_t lui_offset_u = (static_cast(lui_offset)) << kLuiShift; - uint32_t jic_offset_u = static_cast(jic_offset) & kImm16Mask; - - return lui_offset_u | jic_offset_u; -} - -// Use just lui and jic instructions. Insert lower part of the target address in -// jic offset part. Since jic sign-extends offset and then add it with register, -// before that addition, difference between upper part of the target address and -// upper part of the sign-extended offset (0xFFFF or 0x0000), will be inserted -// in jic register with lui instruction. -void Assembler::UnpackTargetAddress(uint32_t address, int16_t* lui_offset, - int16_t* jic_offset) { - *lui_offset = (address & kHiMask) >> kLuiShift; - *jic_offset = address & kLoMask; - - if (*jic_offset < 0) { - *lui_offset -= kImm16Mask; - } -} - -void Assembler::UnpackTargetAddressUnsigned(uint32_t address, - uint32_t* lui_offset, - uint32_t* jic_offset) { - int16_t lui_offset16 = (address & kHiMask) >> kLuiShift; - int16_t jic_offset16 = address & kLoMask; - - if (jic_offset16 < 0) { - lui_offset16 -= kImm16Mask; - } - *lui_offset = static_cast(lui_offset16) & kImm16Mask; - *jic_offset = static_cast(jic_offset16) & kImm16Mask; -} - -void Assembler::PatchLuiOriImmediate(int pc, int32_t imm, Instr instr_lui, - Address offset_lui, Instr instr_ori, - Address offset_ori) { - DCHECK(IsLui(instr_lui)); - DCHECK(IsOri(instr_ori)); - instr_at_put(static_cast(pc + offset_lui), - instr_lui | ((imm >> kLuiShift) & kImm16Mask)); - instr_at_put(static_cast(pc + offset_ori), - instr_ori | (imm & kImm16Mask)); -} - -void Assembler::PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr_lui, - Address offset_lui, Instr instr_ori, - Address offset_ori) { - DCHECK(IsLui(instr_lui)); - DCHECK(IsOri(instr_ori)); - instr_at_put(pc + offset_lui, instr_lui | ((imm >> kLuiShift) & kImm16Mask)); - instr_at_put(pc + offset_ori, instr_ori | (imm & kImm16Mask)); -} - -int32_t Assembler::GetLuiOriImmediate(Instr instr_lui, Instr instr_ori) { - DCHECK(IsLui(instr_lui)); - DCHECK(IsOri(instr_ori)); - int32_t imm; - imm = (instr_lui & static_cast(kImm16Mask)) << kLuiShift; - imm |= (instr_ori & static_cast(kImm16Mask)); - return imm; -} - -int Assembler::target_at(int pos, bool is_internal) { - Instr instr = instr_at(pos); - if (is_internal) { - if (instr == 0) { - return kEndOfChain; - } else { - int32_t instr_address = reinterpret_cast(buffer_start_ + pos); - int delta = static_cast(instr_address - instr); - DCHECK(pos > delta); - return pos - delta; - } - } - if ((instr & ~kImm16Mask) == 0) { - // Emitted label constant, not part of a branch. - if (instr == 0) { - return kEndOfChain; - } else { - int32_t imm18 = ((instr & static_cast(kImm16Mask)) << 16) >> 14; - return (imm18 + pos); - } - } - // Check we have a branch or jump instruction. - DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra)); - if (IsBranch(instr)) { - return AddBranchOffset(pos, instr); - } else if (IsMov(instr, t8, ra)) { - int32_t imm32; - Instr instr_lui = instr_at(pos + 2 * kInstrSize); - Instr instr_ori = instr_at(pos + 3 * kInstrSize); - imm32 = GetLuiOriImmediate(instr_lui, instr_ori); - if (imm32 == kEndOfJumpChain) { - // EndOfChain sentinel is returned directly, not relative to pc or pos. - return kEndOfChain; - } - return pos + Assembler::kLongBranchPCOffset + imm32; - } else { - DCHECK(IsLui(instr)); - if (IsNal(instr_at(pos + kInstrSize))) { - int32_t imm32; - Instr instr_lui = instr_at(pos + 0 * kInstrSize); - Instr instr_ori = instr_at(pos + 2 * kInstrSize); - imm32 = GetLuiOriImmediate(instr_lui, instr_ori); - if (imm32 == kEndOfJumpChain) { - // EndOfChain sentinel is returned directly, not relative to pc or pos. - return kEndOfChain; - } - return pos + Assembler::kLongBranchPCOffset + imm32; - } else { - Instr instr1 = instr_at(pos + 0 * kInstrSize); - Instr instr2 = instr_at(pos + 1 * kInstrSize); - DCHECK(IsOri(instr2) || IsJicOrJialc(instr2)); - int32_t imm; - if (IsJicOrJialc(instr2)) { - imm = CreateTargetAddress(instr1, instr2); - } else { - imm = GetLuiOriImmediate(instr1, instr2); - } - - if (imm == kEndOfJumpChain) { - // EndOfChain sentinel is returned directly, not relative to pc or pos. - return kEndOfChain; - } else { - uint32_t instr_address = reinterpret_cast(buffer_start_ + pos); - int32_t delta = instr_address - imm; - DCHECK(pos > delta); - return pos - delta; - } - } - } -} - -static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos, - Instr instr) { - int32_t bits = OffsetSizeInBits(instr); - int32_t imm = target_pos - (pos + Assembler::kBranchPCOffset); - DCHECK_EQ(imm & 3, 0); - imm >>= 2; - - const int32_t mask = (1 << bits) - 1; - instr &= ~mask; - DCHECK(is_intn(imm, bits)); - - return instr | (imm & mask); -} - -void Assembler::target_at_put(int32_t pos, int32_t target_pos, - bool is_internal) { - Instr instr = instr_at(pos); - - if (is_internal) { - uint32_t imm = reinterpret_cast(buffer_start_) + target_pos; - instr_at_put(pos, imm); - return; - } - if ((instr & ~kImm16Mask) == 0) { - DCHECK(target_pos == kEndOfChain || target_pos >= 0); - // Emitted label constant, not part of a branch. - // Make label relative to Code pointer of generated Code object. - instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag)); - return; - } - - DCHECK(IsBranch(instr) || IsLui(instr) || IsMov(instr, t8, ra)); - if (IsBranch(instr)) { - instr = SetBranchOffset(pos, target_pos, instr); - instr_at_put(pos, instr); - } else if (IsMov(instr, t8, ra)) { - Instr instr_lui = instr_at(pos + 2 * kInstrSize); - Instr instr_ori = instr_at(pos + 3 * kInstrSize); - DCHECK(IsLui(instr_lui)); - DCHECK(IsOri(instr_ori)); - - int32_t imm_short = target_pos - (pos + Assembler::kBranchPCOffset); - - if (is_int16(imm_short)) { - // Optimize by converting to regular branch with 16-bit - // offset - Instr instr_b = BEQ; - instr_b = SetBranchOffset(pos, target_pos, instr_b); - - Instr instr_j = instr_at(pos + 5 * kInstrSize); - Instr instr_branch_delay; - - if (IsJump(instr_j)) { - // Case when branch delay slot is protected. - instr_branch_delay = nopInstr; - } else { - // Case when branch delay slot is used. - instr_branch_delay = instr_at(pos + 7 * kInstrSize); - } - instr_at_put(pos + 0 * kInstrSize, instr_b); - instr_at_put(pos + 1 * kInstrSize, instr_branch_delay); - } else { - int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); - DCHECK_EQ(imm & 3, 0); - - instr_lui &= ~kImm16Mask; - instr_ori &= ~kImm16Mask; - - PatchLuiOriImmediate(pos, imm, instr_lui, 2 * kInstrSize, instr_ori, - 3 * kInstrSize); - } - } else { - DCHECK(IsLui(instr)); - if (IsNal(instr_at(pos + kInstrSize))) { - Instr instr_lui = instr_at(pos + 0 * kInstrSize); - Instr instr_ori = instr_at(pos + 2 * kInstrSize); - DCHECK(IsLui(instr_lui)); - DCHECK(IsOri(instr_ori)); - int32_t imm = target_pos - (pos + Assembler::kLongBranchPCOffset); - DCHECK_EQ(imm & 3, 0); - if (is_int16(imm + Assembler::kLongBranchPCOffset - - Assembler::kBranchPCOffset)) { - // Optimize by converting to regular branch and link with 16-bit - // offset. - Instr instr_b = REGIMM | BGEZAL; // Branch and link. - instr_b = SetBranchOffset(pos, target_pos, instr_b); - // Correct ra register to point to one instruction after jalr from - // TurboAssembler::BranchAndLinkLong. - Instr instr_a = ADDIU | ra.code() << kRsShift | ra.code() << kRtShift | - kOptimizedBranchAndLinkLongReturnOffset; - - instr_at_put(pos, instr_b); - instr_at_put(pos + 1 * kInstrSize, instr_a); - } else { - instr_lui &= ~kImm16Mask; - instr_ori &= ~kImm16Mask; - PatchLuiOriImmediate(pos, imm, instr_lui, 0 * kInstrSize, instr_ori, - 2 * kInstrSize); - } - } else { - Instr instr1 = instr_at(pos + 0 * kInstrSize); - Instr instr2 = instr_at(pos + 1 * kInstrSize); - DCHECK(IsOri(instr2) || IsJicOrJialc(instr2)); - uint32_t imm = reinterpret_cast(buffer_start_) + target_pos; - DCHECK_EQ(imm & 3, 0); - DCHECK(IsLui(instr1) && (IsJicOrJialc(instr2) || IsOri(instr2))); - instr1 &= ~kImm16Mask; - instr2 &= ~kImm16Mask; - - if (IsJicOrJialc(instr2)) { - uint32_t lui_offset_u, jic_offset_u; - UnpackTargetAddressUnsigned(imm, &lui_offset_u, &jic_offset_u); - instr_at_put(pos + 0 * kInstrSize, instr1 | lui_offset_u); - instr_at_put(pos + 1 * kInstrSize, instr2 | jic_offset_u); - } else { - PatchLuiOriImmediate(pos, imm, instr1, 0 * kInstrSize, instr2, - 1 * kInstrSize); - } - } - } -} - -void Assembler::print(const Label* L) { - if (L->is_unused()) { - PrintF("unused label\n"); - } else if (L->is_bound()) { - PrintF("bound label to %d\n", L->pos()); - } else if (L->is_linked()) { - Label l; - l.link_to(L->pos()); - PrintF("unbound label"); - while (l.is_linked()) { - PrintF("@ %d ", l.pos()); - Instr instr = instr_at(l.pos()); - if ((instr & ~kImm16Mask) == 0) { - PrintF("value\n"); - } else { - PrintF("%d\n", instr); - } - next(&l, is_internal_reference(&l)); - } - } else { - PrintF("label in inconsistent state (pos = %d)\n", L->pos_); - } -} - -void Assembler::bind_to(Label* L, int pos) { - DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position. - int32_t trampoline_pos = kInvalidSlotPos; - bool is_internal = false; - if (L->is_linked() && !trampoline_emitted_) { - unbound_labels_count_--; - if (!is_internal_reference(L)) { - next_buffer_check_ += kTrampolineSlotsSize; - } - } - - while (L->is_linked()) { - int32_t fixup_pos = L->pos(); - int32_t dist = pos - fixup_pos; - is_internal = is_internal_reference(L); - next(L, is_internal); // Call next before overwriting link with target at - // fixup_pos. - Instr instr = instr_at(fixup_pos); - if (is_internal) { - target_at_put(fixup_pos, pos, is_internal); - } else { - if (IsBranch(instr)) { - int branch_offset = BranchOffset(instr); - if (dist > branch_offset) { - if (trampoline_pos == kInvalidSlotPos) { - trampoline_pos = get_trampoline_entry(fixup_pos); - CHECK_NE(trampoline_pos, kInvalidSlotPos); - } - CHECK((trampoline_pos - fixup_pos) <= branch_offset); - target_at_put(fixup_pos, trampoline_pos, false); - fixup_pos = trampoline_pos; - } - target_at_put(fixup_pos, pos, false); - } else { - target_at_put(fixup_pos, pos, false); - } - } - } - L->bind_to(pos); - - // Keep track of the last bound label so we don't eliminate any instructions - // before a bound label. - if (pos > last_bound_pos_) last_bound_pos_ = pos; -} - -void Assembler::bind(Label* L) { - DCHECK(!L->is_bound()); // Label can only be bound once. - bind_to(L, pc_offset()); -} - -void Assembler::next(Label* L, bool is_internal) { - DCHECK(L->is_linked()); - int link = target_at(L->pos(), is_internal); - if (link == kEndOfChain) { - L->Unuse(); - } else { - DCHECK_GE(link, 0); - L->link_to(link); - } -} - -bool Assembler::is_near(Label* L) { - DCHECK(L->is_bound()); - return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; -} - -bool Assembler::is_near(Label* L, OffsetSize bits) { - if (L == nullptr || !L->is_bound()) return true; - return pc_offset() - L->pos() < (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize; -} - -bool Assembler::is_near_branch(Label* L) { - DCHECK(L->is_bound()); - return IsMipsArchVariant(kMips32r6) ? is_near_r6(L) : is_near_pre_r6(L); -} - -int Assembler::BranchOffset(Instr instr) { - // At pre-R6 and for other R6 branches the offset is 16 bits. - int bits = OffsetSize::kOffset16; - - if (IsMipsArchVariant(kMips32r6)) { - uint32_t opcode = GetOpcodeField(instr); - switch (opcode) { - // Checks BC or BALC. - case BC: - case BALC: - bits = OffsetSize::kOffset26; - break; - - // Checks BEQZC or BNEZC. - case POP66: - case POP76: - if (GetRsField(instr) != 0) bits = OffsetSize::kOffset21; - break; - default: - break; - } - } - - return (1 << (bits + 2 - 1)) - 1; -} - -// We have to use a temporary register for things that can be relocated even -// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction -// space. There is no guarantee that the relocated location can be similarly -// encoded. -bool Assembler::MustUseReg(RelocInfo::Mode rmode) { - return !RelocInfo::IsNoInfo(rmode); -} - -void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt, - Register rd, uint16_t sa, - SecondaryField func) { - DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa)); - Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | - (rd.code() << kRdShift) | (sa << kSaShift) | func; - emit(instr); -} - -void Assembler::GenInstrRegister(Opcode opcode, Register rs, Register rt, - uint16_t msb, uint16_t lsb, - SecondaryField func) { - DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb)); - Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | - (msb << kRdShift) | (lsb << kSaShift) | func; - emit(instr); -} - -void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, - FPURegister ft, FPURegister fs, FPURegister fd, - SecondaryField func) { - DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid()); - Instr instr = opcode | fmt | (ft.code() << kFtShift) | - (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; - emit(instr); -} - -void Assembler::GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft, - FPURegister fs, FPURegister fd, - SecondaryField func) { - DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); - Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) | - (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; - emit(instr); -} - -void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, - FPURegister fs, FPURegister fd, - SecondaryField func) { - DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid()); - Instr instr = opcode | fmt | (rt.code() << kRtShift) | - (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; - emit(instr); -} - -void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, - FPUControlRegister fs, SecondaryField func) { - DCHECK(fs.is_valid() && rt.is_valid()); - Instr instr = - opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; - emit(instr); -} - -// Instructions with immediate value. -// Registers are in the order of the instruction encoding, from left to right. -void Assembler::GenInstrImmediate(Opcode opcode, Register rs, Register rt, - int32_t j, - CompactBranchType is_compact_branch) { - DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j))); - Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift) | - (j & kImm16Mask); - emit(instr, is_compact_branch); -} - -void Assembler::GenInstrImmediate(Opcode opcode, Register base, Register rt, - int32_t offset9, int bit6, - SecondaryField func) { - DCHECK(base.is_valid() && rt.is_valid() && is_int9(offset9) && - is_uint1(bit6)); - Instr instr = opcode | (base.code() << kBaseShift) | (rt.code() << kRtShift) | - ((offset9 << kImm9Shift) & kImm9Mask) | bit6 << kBit6Shift | - func; - emit(instr); -} - -void Assembler::GenInstrImmediate(Opcode opcode, Register rs, SecondaryField SF, - int32_t j, - CompactBranchType is_compact_branch) { - DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j))); - Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask); - emit(instr, is_compact_branch); -} - -void Assembler::GenInstrImmediate(Opcode opcode, Register rs, FPURegister ft, - int32_t j, - CompactBranchType is_compact_branch) { - DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); - Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | - (j & kImm16Mask); - emit(instr, is_compact_branch); -} - -void Assembler::GenInstrImmediate(Opcode opcode, Register rs, int32_t offset21, - CompactBranchType is_compact_branch) { - DCHECK(rs.is_valid() && (is_int21(offset21))); - Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask); - emit(instr, is_compact_branch); -} - -void Assembler::GenInstrImmediate(Opcode opcode, Register rs, - uint32_t offset21) { - DCHECK(rs.is_valid() && (is_uint21(offset21))); - Instr instr = opcode | (rs.code() << kRsShift) | (offset21 & kImm21Mask); - emit(instr); -} - -void Assembler::GenInstrImmediate(Opcode opcode, int32_t offset26, - CompactBranchType is_compact_branch) { - DCHECK(is_int26(offset26)); - Instr instr = opcode | (offset26 & kImm26Mask); - emit(instr, is_compact_branch); -} - -void Assembler::GenInstrJump(Opcode opcode, uint32_t address) { - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK(is_uint26(address)); - Instr instr = opcode | address; - emit(instr); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -// MSA instructions -void Assembler::GenInstrMsaI8(SecondaryField operation, uint32_t imm8, - MSARegister ws, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(ws.is_valid() && wd.is_valid() && is_uint8(imm8)); - Instr instr = MSA | operation | ((imm8 & kImm8Mask) << kWtShift) | - (ws.code() << kWsShift) | (wd.code() << kWdShift); - emit(instr); -} - -void Assembler::GenInstrMsaI5(SecondaryField operation, SecondaryField df, - int32_t imm5, MSARegister ws, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(ws.is_valid() && wd.is_valid()); - DCHECK((operation == MAXI_S) || (operation == MINI_S) || - (operation == CEQI) || (operation == CLTI_S) || - (operation == CLEI_S) - ? is_int5(imm5) - : is_uint5(imm5)); - Instr instr = MSA | operation | df | ((imm5 & kImm5Mask) << kWtShift) | - (ws.code() << kWsShift) | (wd.code() << kWdShift); - emit(instr); -} - -void Assembler::GenInstrMsaBit(SecondaryField operation, SecondaryField df, - uint32_t m, MSARegister ws, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(ws.is_valid() && wd.is_valid() && is_valid_msa_df_m(df, m)); - Instr instr = MSA | operation | df | (m << kWtShift) | - (ws.code() << kWsShift) | (wd.code() << kWdShift); - emit(instr); -} - -void Assembler::GenInstrMsaI10(SecondaryField operation, SecondaryField df, - int32_t imm10, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(wd.is_valid() && is_int10(imm10)); - Instr instr = MSA | operation | df | ((imm10 & kImm10Mask) << kWsShift) | - (wd.code() << kWdShift); - emit(instr); -} - -template -void Assembler::GenInstrMsa3R(SecondaryField operation, SecondaryField df, - RegType t, MSARegister ws, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(t.is_valid() && ws.is_valid() && wd.is_valid()); - Instr instr = MSA | operation | df | (t.code() << kWtShift) | - (ws.code() << kWsShift) | (wd.code() << kWdShift); - emit(instr); -} - -template -void Assembler::GenInstrMsaElm(SecondaryField operation, SecondaryField df, - uint32_t n, SrcType src, DstType dst) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(src.is_valid() && dst.is_valid() && is_valid_msa_df_n(df, n)); - Instr instr = MSA | operation | df | (n << kWtShift) | - (src.code() << kWsShift) | (dst.code() << kWdShift) | - MSA_ELM_MINOR; - emit(instr); -} - -void Assembler::GenInstrMsa3RF(SecondaryField operation, uint32_t df, - MSARegister wt, MSARegister ws, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid()); - DCHECK_LT(df, 2); - Instr instr = MSA | operation | (df << 21) | (wt.code() << kWtShift) | - (ws.code() << kWsShift) | (wd.code() << kWdShift); - emit(instr); -} - -void Assembler::GenInstrMsaVec(SecondaryField operation, MSARegister wt, - MSARegister ws, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(wt.is_valid() && ws.is_valid() && wd.is_valid()); - Instr instr = MSA | operation | (wt.code() << kWtShift) | - (ws.code() << kWsShift) | (wd.code() << kWdShift) | - MSA_VEC_2R_2RF_MINOR; - emit(instr); -} - -void Assembler::GenInstrMsaMI10(SecondaryField operation, int32_t s10, - Register rs, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(rs.is_valid() && wd.is_valid() && is_int10(s10)); - Instr instr = MSA | operation | ((s10 & kImm10Mask) << kWtShift) | - (rs.code() << kWsShift) | (wd.code() << kWdShift); - emit(instr); -} - -void Assembler::GenInstrMsa2R(SecondaryField operation, SecondaryField df, - MSARegister ws, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(ws.is_valid() && wd.is_valid()); - Instr instr = MSA | MSA_2R_FORMAT | operation | df | (ws.code() << kWsShift) | - (wd.code() << kWdShift) | MSA_VEC_2R_2RF_MINOR; - emit(instr); -} - -void Assembler::GenInstrMsa2RF(SecondaryField operation, SecondaryField df, - MSARegister ws, MSARegister wd) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(ws.is_valid() && wd.is_valid()); - Instr instr = MSA | MSA_2RF_FORMAT | operation | df | - (ws.code() << kWsShift) | (wd.code() << kWdShift) | - MSA_VEC_2R_2RF_MINOR; - emit(instr); -} - -void Assembler::GenInstrMsaBranch(SecondaryField operation, MSARegister wt, - int32_t offset16) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(wt.is_valid() && is_int16(offset16)); - BlockTrampolinePoolScope block_trampoline_pool(this); - Instr instr = - COP1 | operation | (wt.code() << kWtShift) | (offset16 & kImm16Mask); - emit(instr); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -// Returns the next free trampoline entry. -int32_t Assembler::get_trampoline_entry(int32_t pos) { - int32_t trampoline_entry = kInvalidSlotPos; - - if (!internal_trampoline_exception_) { - if (trampoline_.start() > pos) { - trampoline_entry = trampoline_.take_slot(); - } - - if (kInvalidSlotPos == trampoline_entry) { - internal_trampoline_exception_ = true; - } - } - return trampoline_entry; -} - -uint32_t Assembler::jump_address(Label* L) { - int32_t target_pos; - - if (L->is_bound()) { - target_pos = L->pos(); - } else { - if (L->is_linked()) { - target_pos = L->pos(); // L's link. - L->link_to(pc_offset()); - } else { - L->link_to(pc_offset()); - return kEndOfJumpChain; - } - } - - uint32_t imm = reinterpret_cast(buffer_start_) + target_pos; - DCHECK_EQ(imm & 3, 0); - - return imm; -} - -uint32_t Assembler::branch_long_offset(Label* L) { - int32_t target_pos; - - if (L->is_bound()) { - target_pos = L->pos(); - } else { - if (L->is_linked()) { - target_pos = L->pos(); // L's link. - L->link_to(pc_offset()); - } else { - L->link_to(pc_offset()); - return kEndOfJumpChain; - } - } - - DCHECK(is_int32(static_cast(target_pos) - - static_cast(pc_offset() + kLongBranchPCOffset))); - int32_t offset = target_pos - (pc_offset() + kLongBranchPCOffset); - DCHECK_EQ(offset & 3, 0); - - return offset; -} - -int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) { - int32_t target_pos; - int32_t pad = IsPrevInstrCompactBranch() ? kInstrSize : 0; - - if (L->is_bound()) { - target_pos = L->pos(); - } else { - if (L->is_linked()) { - target_pos = L->pos(); - L->link_to(pc_offset() + pad); - } else { - L->link_to(pc_offset() + pad); - if (!trampoline_emitted_) { - unbound_labels_count_++; - next_buffer_check_ -= kTrampolineSlotsSize; - } - return kEndOfChain; - } - } - - int32_t offset = target_pos - (pc_offset() + kBranchPCOffset + pad); - DCHECK(is_intn(offset, bits + 2)); - DCHECK_EQ(offset & 3, 0); - - return offset; -} - -void Assembler::label_at_put(Label* L, int at_offset) { - int target_pos; - if (L->is_bound()) { - target_pos = L->pos(); - instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag)); - } else { - if (L->is_linked()) { - target_pos = L->pos(); // L's link. - int32_t imm18 = target_pos - at_offset; - DCHECK_EQ(imm18 & 3, 0); - int32_t imm16 = imm18 >> 2; - DCHECK(is_int16(imm16)); - instr_at_put(at_offset, (imm16 & kImm16Mask)); - } else { - target_pos = kEndOfChain; - instr_at_put(at_offset, 0); - if (!trampoline_emitted_) { - unbound_labels_count_++; - next_buffer_check_ -= kTrampolineSlotsSize; - } - } - L->link_to(at_offset); - } -} - -//------- Branch and jump instructions -------- - -void Assembler::b(int16_t offset) { beq(zero_reg, zero_reg, offset); } - -void Assembler::bal(int16_t offset) { bgezal(zero_reg, offset); } - -void Assembler::bc(int32_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrImmediate(BC, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::balc(int32_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrImmediate(BALC, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::beq(Register rs, Register rt, int16_t offset) { - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrImmediate(BEQ, rs, rt, offset); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bgez(Register rs, int16_t offset) { - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrImmediate(REGIMM, rs, BGEZ, offset); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bgezc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - GenInstrImmediate(BLEZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bgeuc(Register rs, Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs != zero_reg); - DCHECK(rt != zero_reg); - DCHECK(rs.code() != rt.code()); - GenInstrImmediate(BLEZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bgec(Register rs, Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs != zero_reg); - DCHECK(rt != zero_reg); - DCHECK(rs.code() != rt.code()); - GenInstrImmediate(BLEZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bgezal(Register rs, int16_t offset) { - DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg); - DCHECK(rs != ra); - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrImmediate(REGIMM, rs, BGEZAL, offset); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bgtz(Register rs, int16_t offset) { - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrImmediate(BGTZ, rs, zero_reg, offset); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bgtzc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - GenInstrImmediate(BGTZL, zero_reg, rt, offset, - CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::blez(Register rs, int16_t offset) { - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrImmediate(BLEZ, rs, zero_reg, offset); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::blezc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - GenInstrImmediate(BLEZL, zero_reg, rt, offset, - CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bltzc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - GenInstrImmediate(BGTZL, rt, rt, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bltuc(Register rs, Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs != zero_reg); - DCHECK(rt != zero_reg); - DCHECK(rs.code() != rt.code()); - GenInstrImmediate(BGTZ, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bltc(Register rs, Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs != zero_reg); - DCHECK(rt != zero_reg); - DCHECK(rs.code() != rt.code()); - GenInstrImmediate(BGTZL, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bltz(Register rs, int16_t offset) { - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrImmediate(REGIMM, rs, BLTZ, offset); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bltzal(Register rs, int16_t offset) { - DCHECK(!IsMipsArchVariant(kMips32r6) || rs == zero_reg); - DCHECK(rs != ra); - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrImmediate(REGIMM, rs, BLTZAL, offset); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bne(Register rs, Register rt, int16_t offset) { - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrImmediate(BNE, rs, rt, offset); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bovc(Register rs, Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - if (rs.code() >= rt.code()) { - GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); - } else { - GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); - } -} - -void Assembler::bnvc(Register rs, Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - if (rs.code() >= rt.code()) { - GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); - } else { - GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); - } -} - -void Assembler::blezalc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - DCHECK(rt != ra); - GenInstrImmediate(BLEZ, zero_reg, rt, offset, - CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bgezalc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - DCHECK(rt != ra); - GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bgezall(Register rs, int16_t offset) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - DCHECK(rs != zero_reg); - DCHECK(rs != ra); - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrImmediate(REGIMM, rs, BGEZALL, offset); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bltzalc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - DCHECK(rt != ra); - GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bgtzalc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - DCHECK(rt != ra); - GenInstrImmediate(BGTZ, zero_reg, rt, offset, - CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::beqzalc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - DCHECK(rt != ra); - GenInstrImmediate(ADDI, zero_reg, rt, offset, - CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bnezalc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rt != zero_reg); - DCHECK(rt != ra); - GenInstrImmediate(DADDI, zero_reg, rt, offset, - CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::beqc(Register rs, Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0); - if (rs.code() < rt.code()) { - GenInstrImmediate(ADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); - } else { - GenInstrImmediate(ADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); - } -} - -void Assembler::beqzc(Register rs, int32_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs != zero_reg); - GenInstrImmediate(POP66, rs, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::bnec(Register rs, Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs.code() != rt.code() && rs.code() != 0 && rt.code() != 0); - if (rs.code() < rt.code()) { - GenInstrImmediate(DADDI, rs, rt, offset, CompactBranchType::COMPACT_BRANCH); - } else { - GenInstrImmediate(DADDI, rt, rs, offset, CompactBranchType::COMPACT_BRANCH); - } -} - -void Assembler::bnezc(Register rs, int32_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs != zero_reg); - GenInstrImmediate(POP76, rs, offset, CompactBranchType::COMPACT_BRANCH); -} - -void Assembler::j(int32_t target) { -#if DEBUG - // Get pc of delay slot. - uint32_t ipc = reinterpret_cast(pc_ + 1 * kInstrSize); - bool in_range = ((ipc ^ static_cast(target)) >> - (kImm26Bits + kImmFieldShift)) == 0; - DCHECK(in_range && ((target & 3) == 0)); -#endif - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrJump(J, (target >> 2) & kImm26Mask); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::jr(Register rs) { - if (!IsMipsArchVariant(kMips32r6)) { - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR); - BlockTrampolinePoolFor(1); // For associated delay slot. - } else { - jalr(rs, zero_reg); - } -} - -void Assembler::jal(int32_t target) { -#ifdef DEBUG - // Get pc of delay slot. - uint32_t ipc = reinterpret_cast(pc_ + 1 * kInstrSize); - bool in_range = ((ipc ^ static_cast(target)) >> - (kImm26Bits + kImmFieldShift)) == 0; - DCHECK(in_range && ((target & 3) == 0)); -#endif - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrJump(JAL, (target >> 2) & kImm26Mask); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::jalr(Register rs, Register rd) { - DCHECK(rs.code() != rd.code()); - BlockTrampolinePoolScope block_trampoline_pool(this); - GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::jic(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrImmediate(POP66, zero_reg, rt, offset); -} - -void Assembler::jialc(Register rt, int16_t offset) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrImmediate(POP76, zero_reg, rt, offset); -} - -// -------Data-processing-instructions--------- - -// Arithmetic. - -void Assembler::addu(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU); -} - -void Assembler::addiu(Register rd, Register rs, int32_t j) { - GenInstrImmediate(ADDIU, rs, rd, j); -} - -void Assembler::subu(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU); -} - -void Assembler::mul(Register rd, Register rs, Register rt) { - if (!IsMipsArchVariant(kMips32r6)) { - GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL); - } else { - GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH); - } -} - -void Assembler::mulu(Register rd, Register rs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U); -} - -void Assembler::muh(Register rd, Register rs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH); -} - -void Assembler::muhu(Register rd, Register rs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U); -} - -void Assembler::mod(Register rd, Register rs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD); -} - -void Assembler::modu(Register rd, Register rs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U); -} - -void Assembler::mult(Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT); -} - -void Assembler::multu(Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU); -} - -void Assembler::div(Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV); -} - -void Assembler::div(Register rd, Register rs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD); -} - -void Assembler::divu(Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU); -} - -void Assembler::divu(Register rd, Register rs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U); -} - -// Logical. - -void Assembler::and_(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND); -} - -void Assembler::andi(Register rt, Register rs, int32_t j) { - DCHECK(is_uint16(j)); - GenInstrImmediate(ANDI, rs, rt, j); -} - -void Assembler::or_(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR); -} - -void Assembler::ori(Register rt, Register rs, int32_t j) { - DCHECK(is_uint16(j)); - GenInstrImmediate(ORI, rs, rt, j); -} - -void Assembler::xor_(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR); -} - -void Assembler::xori(Register rt, Register rs, int32_t j) { - DCHECK(is_uint16(j)); - GenInstrImmediate(XORI, rs, rt, j); -} - -void Assembler::nor(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR); -} - -// Shifts. -void Assembler::sll(Register rd, Register rt, uint16_t sa, - bool coming_from_nop) { - // Don't allow nop instructions in the form sll zero_reg, zero_reg to be - // generated using the sll instruction. They must be generated using - // nop(int/NopMarkerTypes). - DCHECK(coming_from_nop || !(rd == zero_reg && rt == zero_reg)); - GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SLL); -} - -void Assembler::sllv(Register rd, Register rt, Register rs) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV); -} - -void Assembler::srl(Register rd, Register rt, uint16_t sa) { - GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRL); -} - -void Assembler::srlv(Register rd, Register rt, Register rs) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV); -} - -void Assembler::sra(Register rd, Register rt, uint16_t sa) { - GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa & 0x1F, SRA); -} - -void Assembler::srav(Register rd, Register rt, Register rs) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV); -} - -void Assembler::rotr(Register rd, Register rt, uint16_t sa) { - // Should be called via MacroAssembler::Ror. - DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa)); - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift) | - (rd.code() << kRdShift) | (sa << kSaShift) | SRL; - emit(instr); -} - -void Assembler::rotrv(Register rd, Register rt, Register rs) { - // Should be called via MacroAssembler::Ror. - DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid()); - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift) | - (rd.code() << kRdShift) | (1 << kSaShift) | SRLV; - emit(instr); -} - -void Assembler::lsa(Register rd, Register rt, Register rs, uint8_t sa) { - DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid()); - DCHECK_LE(sa, 3); - DCHECK(IsMipsArchVariant(kMips32r6)); - Instr instr = SPECIAL | rs.code() << kRsShift | rt.code() << kRtShift | - rd.code() << kRdShift | sa << kSaShift | LSA; - emit(instr); -} - -// ------------Memory-instructions------------- - -void Assembler::AdjustBaseAndOffset(MemOperand* src, - OffsetAccessType access_type, - int second_access_add_to_offset) { - // This method is used to adjust the base register and offset pair - // for a load/store when the offset doesn't fit into int16_t. - // It is assumed that 'base + offset' is sufficiently aligned for memory - // operands that are machine word in size or smaller. For doubleword-sized - // operands it's assumed that 'base' is a multiple of 8, while 'offset' - // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments - // and spilled variables on the stack accessed relative to the stack - // pointer register). - // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8. - - bool doubleword_aligned = (src->offset() & (kDoubleSize - 1)) == 0; - bool two_accesses = static_cast(access_type) || !doubleword_aligned; - DCHECK_LE(second_access_add_to_offset, 7); // Must be <= 7. - - // is_int16 must be passed a signed value, hence the static cast below. - if (is_int16(src->offset()) && - (!two_accesses || is_int16(static_cast( - src->offset() + second_access_add_to_offset)))) { - // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified - // value) fits into int16_t. - return; - } - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(src->rm() != scratch); // Must not overwrite the register 'base' - // while loading 'offset'. - -#ifdef DEBUG - // Remember the "(mis)alignment" of 'offset', it will be checked at the end. - uint32_t misalignment = src->offset() & (kDoubleSize - 1); -#endif - - // Do not load the whole 32-bit 'offset' if it can be represented as - // a sum of two 16-bit signed offsets. This can save an instruction or two. - // To simplify matters, only do this for a symmetric range of offsets from - // about -64KB to about +64KB, allowing further addition of 4 when accessing - // 64-bit variables with two 32-bit accesses. - constexpr int32_t kMinOffsetForSimpleAdjustment = - 0x7FF8; // Max int16_t that's a multiple of 8. - constexpr int32_t kMaxOffsetForSimpleAdjustment = - 2 * kMinOffsetForSimpleAdjustment; - if (0 <= src->offset() && src->offset() <= kMaxOffsetForSimpleAdjustment) { - addiu(scratch, src->rm(), kMinOffsetForSimpleAdjustment); - src->offset_ -= kMinOffsetForSimpleAdjustment; - } else if (-kMaxOffsetForSimpleAdjustment <= src->offset() && - src->offset() < 0) { - addiu(scratch, src->rm(), -kMinOffsetForSimpleAdjustment); - src->offset_ += kMinOffsetForSimpleAdjustment; - } else if (IsMipsArchVariant(kMips32r6)) { - // On r6 take advantage of the aui instruction, e.g.: - // aui at, base, offset_high - // lw reg_lo, offset_low(at) - // lw reg_hi, (offset_low+4)(at) - // or when offset_low+4 overflows int16_t: - // aui at, base, offset_high - // addiu at, at, 8 - // lw reg_lo, (offset_low-8)(at) - // lw reg_hi, (offset_low-4)(at) - int16_t offset_high = static_cast(src->offset() >> 16); - int16_t offset_low = static_cast(src->offset()); - offset_high += (offset_low < 0) - ? 1 - : 0; // Account for offset sign extension in load/store. - aui(scratch, src->rm(), static_cast(offset_high)); - if (two_accesses && !is_int16(static_cast( - offset_low + second_access_add_to_offset))) { - // Avoid overflow in the 16-bit offset of the load/store instruction when - // adding 4. - addiu(scratch, scratch, kDoubleSize); - offset_low -= kDoubleSize; - } - src->offset_ = offset_low; - } else { - // Do not load the whole 32-bit 'offset' if it can be represented as - // a sum of three 16-bit signed offsets. This can save an instruction. - // To simplify matters, only do this for a symmetric range of offsets from - // about -96KB to about +96KB, allowing further addition of 4 when accessing - // 64-bit variables with two 32-bit accesses. - constexpr int32_t kMinOffsetForMediumAdjustment = - 2 * kMinOffsetForSimpleAdjustment; - constexpr int32_t kMaxOffsetForMediumAdjustment = - 3 * kMinOffsetForSimpleAdjustment; - if (0 <= src->offset() && src->offset() <= kMaxOffsetForMediumAdjustment) { - addiu(scratch, src->rm(), kMinOffsetForMediumAdjustment / 2); - addiu(scratch, scratch, kMinOffsetForMediumAdjustment / 2); - src->offset_ -= kMinOffsetForMediumAdjustment; - } else if (-kMaxOffsetForMediumAdjustment <= src->offset() && - src->offset() < 0) { - addiu(scratch, src->rm(), -kMinOffsetForMediumAdjustment / 2); - addiu(scratch, scratch, -kMinOffsetForMediumAdjustment / 2); - src->offset_ += kMinOffsetForMediumAdjustment; - } else { - // Now that all shorter options have been exhausted, load the full 32-bit - // offset. - int32_t loaded_offset = RoundDown(src->offset(), kDoubleSize); - lui(scratch, (loaded_offset >> kLuiShift) & kImm16Mask); - ori(scratch, scratch, loaded_offset & kImm16Mask); // Load 32-bit offset. - addu(scratch, scratch, src->rm()); - src->offset_ -= loaded_offset; - } - } - src->rm_ = scratch; - - DCHECK(is_int16(src->offset())); - if (two_accesses) { - DCHECK(is_int16( - static_cast(src->offset() + second_access_add_to_offset))); - } - DCHECK(misalignment == (src->offset() & (kDoubleSize - 1))); -} - -void Assembler::lb(Register rd, const MemOperand& rs) { - MemOperand source = rs; - AdjustBaseAndOffset(&source); - GenInstrImmediate(LB, source.rm(), rd, source.offset()); -} - -void Assembler::lbu(Register rd, const MemOperand& rs) { - MemOperand source = rs; - AdjustBaseAndOffset(&source); - GenInstrImmediate(LBU, source.rm(), rd, source.offset()); -} - -void Assembler::lh(Register rd, const MemOperand& rs) { - MemOperand source = rs; - AdjustBaseAndOffset(&source); - GenInstrImmediate(LH, source.rm(), rd, source.offset()); -} - -void Assembler::lhu(Register rd, const MemOperand& rs) { - MemOperand source = rs; - AdjustBaseAndOffset(&source); - GenInstrImmediate(LHU, source.rm(), rd, source.offset()); -} - -void Assembler::lw(Register rd, const MemOperand& rs) { - MemOperand source = rs; - AdjustBaseAndOffset(&source); - GenInstrImmediate(LW, source.rm(), rd, source.offset()); -} - -void Assembler::lwl(Register rd, const MemOperand& rs) { - DCHECK(is_int16(rs.offset_)); - DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kMips32r2)); - GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_); -} - -void Assembler::lwr(Register rd, const MemOperand& rs) { - DCHECK(is_int16(rs.offset_)); - DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kMips32r2)); - GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_); -} - -void Assembler::sb(Register rd, const MemOperand& rs) { - MemOperand source = rs; - AdjustBaseAndOffset(&source); - GenInstrImmediate(SB, source.rm(), rd, source.offset()); -} - -void Assembler::sh(Register rd, const MemOperand& rs) { - MemOperand source = rs; - AdjustBaseAndOffset(&source); - GenInstrImmediate(SH, source.rm(), rd, source.offset()); -} - -void Assembler::sw(Register rd, const MemOperand& rs) { - MemOperand source = rs; - AdjustBaseAndOffset(&source); - GenInstrImmediate(SW, source.rm(), rd, source.offset()); -} - -void Assembler::swl(Register rd, const MemOperand& rs) { - DCHECK(is_int16(rs.offset_)); - DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kMips32r2)); - GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_); -} - -void Assembler::swr(Register rd, const MemOperand& rs) { - DCHECK(is_int16(rs.offset_)); - DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kMips32r2)); - GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_); -} - -void Assembler::ll(Register rd, const MemOperand& rs) { - if (IsMipsArchVariant(kMips32r6)) { - DCHECK(is_int9(rs.offset_)); - GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, LL_R6); - } else { - DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kMips32r2)); - DCHECK(is_int16(rs.offset_)); - GenInstrImmediate(LL, rs.rm(), rd, rs.offset_); - } -} - -void Assembler::sc(Register rd, const MemOperand& rs) { - if (IsMipsArchVariant(kMips32r6)) { - DCHECK(is_int9(rs.offset_)); - GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 0, SC_R6); - } else { - DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kMips32r2)); - GenInstrImmediate(SC, rs.rm(), rd, rs.offset_); - } -} - -void Assembler::llx(Register rd, const MemOperand& rs) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(is_int9(rs.offset_)); - GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, LL_R6); -} - -void Assembler::scx(Register rd, const MemOperand& rs) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(is_int9(rs.offset_)); - GenInstrImmediate(SPECIAL3, rs.rm(), rd, rs.offset_, 1, SC_R6); -} - -void Assembler::lui(Register rd, int32_t j) { - DCHECK(is_uint16(j) || is_int16(j)); - GenInstrImmediate(LUI, zero_reg, rd, j); -} - -void Assembler::aui(Register rt, Register rs, int32_t j) { - // This instruction uses same opcode as 'lui'. The difference in encoding is - // 'lui' has zero reg. for rs field. - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs != zero_reg); - DCHECK(is_uint16(j)); - GenInstrImmediate(LUI, rs, rt, j); -} - -// ---------PC-Relative instructions----------- - -void Assembler::addiupc(Register rs, int32_t imm19) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs.is_valid() && is_int19(imm19)); - uint32_t imm21 = ADDIUPC << kImm19Bits | (imm19 & kImm19Mask); - GenInstrImmediate(PCREL, rs, imm21); -} - -void Assembler::lwpc(Register rs, int32_t offset19) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs.is_valid() && is_int19(offset19)); - uint32_t imm21 = LWPC << kImm19Bits | (offset19 & kImm19Mask); - GenInstrImmediate(PCREL, rs, imm21); -} - -void Assembler::auipc(Register rs, int16_t imm16) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs.is_valid()); - uint32_t imm21 = AUIPC << kImm16Bits | (imm16 & kImm16Mask); - GenInstrImmediate(PCREL, rs, imm21); -} - -void Assembler::aluipc(Register rs, int16_t imm16) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(rs.is_valid()); - uint32_t imm21 = ALUIPC << kImm16Bits | (imm16 & kImm16Mask); - GenInstrImmediate(PCREL, rs, imm21); -} - -// -------------Misc-instructions-------------- - -// Break / Trap instructions. -void Assembler::break_(uint32_t code, bool break_as_stop) { - DCHECK_EQ(code & ~0xFFFFF, 0); - // We need to invalidate breaks that could be stops as well because the - // simulator expects a char pointer after the stop instruction. - // See constants-mips.h for explanation. - DCHECK( - (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) || - (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode))); - Instr break_instr = SPECIAL | BREAK | (code << 6); - emit(break_instr); -} - -void Assembler::stop(uint32_t code) { - DCHECK_GT(code, kMaxWatchpointCode); - DCHECK_LE(code, kMaxStopCode); -#if V8_HOST_ARCH_MIPS - break_(0x54321); -#else // V8_HOST_ARCH_MIPS - break_(code, true); -#endif -} - -void Assembler::tge(Register rs, Register rt, uint16_t code) { - DCHECK(is_uint10(code)); - Instr instr = - SPECIAL | TGE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; - emit(instr); -} - -void Assembler::tgeu(Register rs, Register rt, uint16_t code) { - DCHECK(is_uint10(code)); - Instr instr = SPECIAL | TGEU | rs.code() << kRsShift | rt.code() << kRtShift | - code << 6; - emit(instr); -} - -void Assembler::tlt(Register rs, Register rt, uint16_t code) { - DCHECK(is_uint10(code)); - Instr instr = - SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; - emit(instr); -} - -void Assembler::tltu(Register rs, Register rt, uint16_t code) { - DCHECK(is_uint10(code)); - Instr instr = SPECIAL | TLTU | rs.code() << kRsShift | rt.code() << kRtShift | - code << 6; - emit(instr); -} - -void Assembler::teq(Register rs, Register rt, uint16_t code) { - DCHECK(is_uint10(code)); - Instr instr = - SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; - emit(instr); -} - -void Assembler::tne(Register rs, Register rt, uint16_t code) { - DCHECK(is_uint10(code)); - Instr instr = - SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6; - emit(instr); -} - -void Assembler::sync() { - Instr sync_instr = SPECIAL | SYNC; - emit(sync_instr); -} - -// Move from HI/LO register. - -void Assembler::mfhi(Register rd) { - GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI); -} - -void Assembler::mflo(Register rd) { - GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO); -} - -// Set on less than instructions. -void Assembler::slt(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT); -} - -void Assembler::sltu(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU); -} - -void Assembler::slti(Register rt, Register rs, int32_t j) { - GenInstrImmediate(SLTI, rs, rt, j); -} - -void Assembler::sltiu(Register rt, Register rs, int32_t j) { - GenInstrImmediate(SLTIU, rs, rt, j); -} - -// Conditional move. -void Assembler::movz(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ); -} - -void Assembler::movn(Register rd, Register rs, Register rt) { - GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN); -} - -void Assembler::movt(Register rd, Register rs, uint16_t cc) { - Register rt = Register::from_code((cc & 0x0007) << 2 | 1); - GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); -} - -void Assembler::movf(Register rd, Register rs, uint16_t cc) { - Register rt = Register::from_code((cc & 0x0007) << 2 | 0); - GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI); -} - -void Assembler::seleqz(Register rd, Register rs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S); -} - -// Bit twiddling. -void Assembler::clz(Register rd, Register rs) { - if (!IsMipsArchVariant(kMips32r6)) { - // Clz instr requires same GPR number in 'rd' and 'rt' fields. - GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ); - } else { - GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6); - } -} - -void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) { - // Should be called via MacroAssembler::Ins. - // Ins instr has 'rt' field as dest, and two uint5: msb, lsb. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS); -} - -void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) { - // Should be called via MacroAssembler::Ext. - // Ext instr has 'rt' field as dest, and two uint5: msb, lsb. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT); -} - -void Assembler::bitswap(Register rd, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL3, zero_reg, rt, rd, 0, BSHFL); -} - -void Assembler::pref(int32_t hint, const MemOperand& rs) { - DCHECK(!IsMipsArchVariant(kLoongson)); - DCHECK(is_uint5(hint) && is_uint16(rs.offset_)); - Instr instr = - PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift) | (rs.offset_); - emit(instr); -} - -void Assembler::align(Register rd, Register rs, Register rt, uint8_t bp) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(is_uint3(bp)); - uint16_t sa = (ALIGN << kBp2Bits) | bp; - GenInstrRegister(SPECIAL3, rs, rt, rd, sa, BSHFL); -} - -// Byte swap. -void Assembler::wsbh(Register rd, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL3, zero_reg, rt, rd, WSBH, BSHFL); -} - -void Assembler::seh(Register rd, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEH, BSHFL); -} - -void Assembler::seb(Register rd, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL3, zero_reg, rt, rd, SEB, BSHFL); -} - -// --------Coprocessor-instructions---------------- - -// Load, store, move. -void Assembler::lwc1(FPURegister fd, const MemOperand& src) { - MemOperand tmp = src; - AdjustBaseAndOffset(&tmp); - GenInstrImmediate(LWC1, tmp.rm(), fd, tmp.offset()); -} - -void Assembler::swc1(FPURegister fd, const MemOperand& src) { - MemOperand tmp = src; - AdjustBaseAndOffset(&tmp); - GenInstrImmediate(SWC1, tmp.rm(), fd, tmp.offset()); -} - -void Assembler::mtc1(Register rt, FPURegister fs) { - GenInstrRegister(COP1, MTC1, rt, fs, f0); -} - -void Assembler::mthc1(Register rt, FPURegister fs) { - GenInstrRegister(COP1, MTHC1, rt, fs, f0); -} - -void Assembler::mfc1(Register rt, FPURegister fs) { - GenInstrRegister(COP1, MFC1, rt, fs, f0); -} - -void Assembler::mfhc1(Register rt, FPURegister fs) { - GenInstrRegister(COP1, MFHC1, rt, fs, f0); -} - -void Assembler::ctc1(Register rt, FPUControlRegister fs) { - GenInstrRegister(COP1, CTC1, rt, fs); -} - -void Assembler::cfc1(Register rt, FPUControlRegister fs) { - GenInstrRegister(COP1, CFC1, rt, fs); -} - -void Assembler::movn_s(FPURegister fd, FPURegister fs, Register rt) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, S, rt, fs, fd, MOVN_C); -} - -void Assembler::movn_d(FPURegister fd, FPURegister fs, Register rt) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, D, rt, fs, fd, MOVN_C); -} - -void Assembler::sel(SecondaryField fmt, FPURegister fd, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK((fmt == D) || (fmt == S)); - - GenInstrRegister(COP1, fmt, ft, fs, fd, SEL); -} - -void Assembler::sel_s(FPURegister fd, FPURegister fs, FPURegister ft) { - sel(S, fd, fs, ft); -} - -void Assembler::sel_d(FPURegister fd, FPURegister fs, FPURegister ft) { - sel(D, fd, fs, ft); -} - -void Assembler::seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK((fmt == D) || (fmt == S)); - GenInstrRegister(COP1, fmt, ft, fs, fd, SELEQZ_C); -} - -void Assembler::selnez(Register rd, Register rs, Register rt) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S); -} - -void Assembler::selnez(SecondaryField fmt, FPURegister fd, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK((fmt == D) || (fmt == S)); - GenInstrRegister(COP1, fmt, ft, fs, fd, SELNEZ_C); -} - -void Assembler::seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft) { - seleqz(D, fd, fs, ft); -} - -void Assembler::seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft) { - seleqz(S, fd, fs, ft); -} - -void Assembler::selnez_d(FPURegister fd, FPURegister fs, FPURegister ft) { - selnez(D, fd, fs, ft); -} - -void Assembler::selnez_s(FPURegister fd, FPURegister fs, FPURegister ft) { - selnez(S, fd, fs, ft); -} - -void Assembler::movz_s(FPURegister fd, FPURegister fs, Register rt) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, S, rt, fs, fd, MOVZ_C); -} - -void Assembler::movz_d(FPURegister fd, FPURegister fs, Register rt) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, D, rt, fs, fd, MOVZ_C); -} - -void Assembler::movt_s(FPURegister fd, FPURegister fs, uint16_t cc) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1); - GenInstrRegister(COP1, S, ft, fs, fd, MOVF); -} - -void Assembler::movt_d(FPURegister fd, FPURegister fs, uint16_t cc) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 1); - GenInstrRegister(COP1, D, ft, fs, fd, MOVF); -} - -void Assembler::movf_s(FPURegister fd, FPURegister fs, uint16_t cc) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0); - GenInstrRegister(COP1, S, ft, fs, fd, MOVF); -} - -void Assembler::movf_d(FPURegister fd, FPURegister fs, uint16_t cc) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - FPURegister ft = FPURegister::from_code((cc & 0x0007) << 2 | 0); - GenInstrRegister(COP1, D, ft, fs, fd, MOVF); -} - -// Arithmetic. - -void Assembler::add_s(FPURegister fd, FPURegister fs, FPURegister ft) { - GenInstrRegister(COP1, S, ft, fs, fd, ADD_S); -} - -void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) { - GenInstrRegister(COP1, D, ft, fs, fd, ADD_D); -} - -void Assembler::sub_s(FPURegister fd, FPURegister fs, FPURegister ft) { - GenInstrRegister(COP1, S, ft, fs, fd, SUB_S); -} - -void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) { - GenInstrRegister(COP1, D, ft, fs, fd, SUB_D); -} - -void Assembler::mul_s(FPURegister fd, FPURegister fs, FPURegister ft) { - GenInstrRegister(COP1, S, ft, fs, fd, MUL_S); -} - -void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) { - GenInstrRegister(COP1, D, ft, fs, fd, MUL_D); -} - -void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r2)); - GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S); -} - -void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r2)); - GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D); -} - -void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r2)); - GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S); -} - -void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r2)); - GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D); -} - -void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, S, ft, fs, fd, MADDF_S); -} - -void Assembler::maddf_d(FPURegister fd, FPURegister fs, FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, D, ft, fs, fd, MADDF_D); -} - -void Assembler::msubf_s(FPURegister fd, FPURegister fs, FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, S, ft, fs, fd, MSUBF_S); -} - -void Assembler::msubf_d(FPURegister fd, FPURegister fs, FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, D, ft, fs, fd, MSUBF_D); -} - -void Assembler::div_s(FPURegister fd, FPURegister fs, FPURegister ft) { - GenInstrRegister(COP1, S, ft, fs, fd, DIV_S); -} - -void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) { - GenInstrRegister(COP1, D, ft, fs, fd, DIV_D); -} - -void Assembler::abs_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, ABS_S); -} - -void Assembler::abs_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, ABS_D); -} - -void Assembler::mov_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, MOV_D); -} - -void Assembler::mov_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, MOV_S); -} - -void Assembler::neg_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, NEG_S); -} - -void Assembler::neg_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, NEG_D); -} - -void Assembler::sqrt_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, SQRT_S); -} - -void Assembler::sqrt_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D); -} - -void Assembler::rsqrt_s(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, S, f0, fs, fd, RSQRT_S); -} - -void Assembler::rsqrt_d(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, D, f0, fs, fd, RSQRT_D); -} - -void Assembler::recip_d(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, D, f0, fs, fd, RECIP_D); -} - -void Assembler::recip_s(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, S, f0, fs, fd, RECIP_S); -} - -// Conversions. - -void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S); -} - -void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D); -} - -void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S); -} - -void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D); -} - -void Assembler::round_w_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S); -} - -void Assembler::round_w_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D); -} - -void Assembler::floor_w_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S); -} - -void Assembler::floor_w_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D); -} - -void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S); -} - -void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D); -} - -void Assembler::rint_s(FPURegister fd, FPURegister fs) { rint(S, fd, fs); } - -void Assembler::rint(SecondaryField fmt, FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK((fmt == D) || (fmt == S)); - GenInstrRegister(COP1, fmt, f0, fs, fd, RINT); -} - -void Assembler::rint_d(FPURegister fd, FPURegister fs) { rint(D, fd, fs); } - -void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S); -} - -void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D); -} - -void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S); -} - -void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D); -} - -void Assembler::round_l_s(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S); -} - -void Assembler::round_l_d(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D); -} - -void Assembler::floor_l_s(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S); -} - -void Assembler::floor_l_d(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D); -} - -void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S); -} - -void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D); -} - -void Assembler::class_s(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, S, f0, fs, fd, CLASS_S); -} - -void Assembler::class_d(FPURegister fd, FPURegister fs) { - DCHECK(IsMipsArchVariant(kMips32r6)); - GenInstrRegister(COP1, D, f0, fs, fd, CLASS_D); -} - -void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK((fmt == D) || (fmt == S)); - GenInstrRegister(COP1, fmt, ft, fs, fd, MIN); -} - -void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK((fmt == D) || (fmt == S)); - GenInstrRegister(COP1, fmt, ft, fs, fd, MINA); -} - -void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK((fmt == D) || (fmt == S)); - GenInstrRegister(COP1, fmt, ft, fs, fd, MAX); -} - -void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister fs, - FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK((fmt == D) || (fmt == S)); - GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA); -} - -void Assembler::min_s(FPURegister fd, FPURegister fs, FPURegister ft) { - min(S, fd, fs, ft); -} - -void Assembler::min_d(FPURegister fd, FPURegister fs, FPURegister ft) { - min(D, fd, fs, ft); -} - -void Assembler::max_s(FPURegister fd, FPURegister fs, FPURegister ft) { - max(S, fd, fs, ft); -} - -void Assembler::max_d(FPURegister fd, FPURegister fs, FPURegister ft) { - max(D, fd, fs, ft); -} - -void Assembler::mina_s(FPURegister fd, FPURegister fs, FPURegister ft) { - mina(S, fd, fs, ft); -} - -void Assembler::mina_d(FPURegister fd, FPURegister fs, FPURegister ft) { - mina(D, fd, fs, ft); -} - -void Assembler::maxa_s(FPURegister fd, FPURegister fs, FPURegister ft) { - maxa(S, fd, fs, ft); -} - -void Assembler::maxa_d(FPURegister fd, FPURegister fs, FPURegister ft) { - maxa(D, fd, fs, ft); -} - -void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W); -} - -void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L); -} - -void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D); -} - -void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W); -} - -void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) { - DCHECK((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()); - GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L); -} - -void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { - GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S); -} - -// Conditions for >= MIPSr6. -void Assembler::cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd, - FPURegister fs, FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK_EQ(fmt & ~(31 << kRsShift), 0); - Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift | - fd.code() << kFdShift | (0 << 5) | cond; - emit(instr); -} - -void Assembler::cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, - FPURegister ft) { - cmp(cond, W, fd, fs, ft); -} - -void Assembler::cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, - FPURegister ft) { - cmp(cond, L, fd, fs, ft); -} - -void Assembler::bc1eqz(int16_t offset, FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - BlockTrampolinePoolScope block_trampoline_pool(this); - Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask); - emit(instr); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bc1nez(int16_t offset, FPURegister ft) { - DCHECK(IsMipsArchVariant(kMips32r6)); - BlockTrampolinePoolScope block_trampoline_pool(this); - Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask); - emit(instr); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -// Conditions for < MIPSr6. -void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs, - FPURegister ft, uint16_t cc) { - DCHECK(is_uint3(cc)); - DCHECK(fmt == S || fmt == D); - DCHECK_EQ(fmt & ~(31 << kRsShift), 0); - Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift | cc << 8 | - 3 << 4 | cond; - emit(instr); -} - -void Assembler::c_s(FPUCondition cond, FPURegister fs, FPURegister ft, - uint16_t cc) { - c(cond, S, fs, ft, cc); -} - -void Assembler::c_d(FPUCondition cond, FPURegister fs, FPURegister ft, - uint16_t cc) { - c(cond, D, fs, ft, cc); -} - -void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) { - DCHECK_EQ(src2, 0.0); - mtc1(zero_reg, f14); - cvt_d_w(f14, f14); - c(cond, D, src1, f14, 0); -} - -void Assembler::bc1f(int16_t offset, uint16_t cc) { - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK(is_uint3(cc)); - Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); - emit(instr); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -void Assembler::bc1t(int16_t offset, uint16_t cc) { - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK(is_uint3(cc)); - Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); - emit(instr); - BlockTrampolinePoolFor(1); // For associated delay slot. -} - -// ---------- MSA instructions ------------ -#define MSA_BRANCH_LIST(V) \ - V(bz_v, BZ_V) \ - V(bz_b, BZ_B) \ - V(bz_h, BZ_H) \ - V(bz_w, BZ_W) \ - V(bz_d, BZ_D) \ - V(bnz_v, BNZ_V) \ - V(bnz_b, BNZ_B) \ - V(bnz_h, BNZ_H) \ - V(bnz_w, BNZ_W) \ - V(bnz_d, BNZ_D) - -#define MSA_BRANCH(name, opcode) \ - void Assembler::name(MSARegister wt, int16_t offset) { \ - GenInstrMsaBranch(opcode, wt, offset); \ - } - -MSA_BRANCH_LIST(MSA_BRANCH) -#undef MSA_BRANCH -#undef MSA_BRANCH_LIST - -#define MSA_LD_ST_LIST(V) \ - V(ld_b, LD_B) \ - V(ld_h, LD_H) \ - V(ld_w, LD_W) \ - V(ld_d, LD_D) \ - V(st_b, ST_B) \ - V(st_h, ST_H) \ - V(st_w, ST_W) \ - V(st_d, ST_D) - -#define MSA_LD_ST(name, opcode) \ - void Assembler::name(MSARegister wd, const MemOperand& rs) { \ - MemOperand source = rs; \ - AdjustBaseAndOffset(&source); \ - if (is_int10(source.offset())) { \ - GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \ - } else { \ - UseScratchRegisterScope temps(this); \ - Register scratch = temps.Acquire(); \ - DCHECK(rs.rm() != scratch); \ - addiu(scratch, source.rm(), source.offset()); \ - GenInstrMsaMI10(opcode, 0, scratch, wd); \ - } \ - } - -MSA_LD_ST_LIST(MSA_LD_ST) -#undef MSA_LD_ST -#undef MSA_LD_ST_LIST - -#define MSA_I10_LIST(V) \ - V(ldi_b, I5_DF_b) \ - V(ldi_h, I5_DF_h) \ - V(ldi_w, I5_DF_w) \ - V(ldi_d, I5_DF_d) - -#define MSA_I10(name, format) \ - void Assembler::name(MSARegister wd, int32_t imm10) { \ - GenInstrMsaI10(LDI, format, imm10, wd); \ - } -MSA_I10_LIST(MSA_I10) -#undef MSA_I10 -#undef MSA_I10_LIST - -#define MSA_I5_LIST(V) \ - V(addvi, ADDVI) \ - V(subvi, SUBVI) \ - V(maxi_s, MAXI_S) \ - V(maxi_u, MAXI_U) \ - V(mini_s, MINI_S) \ - V(mini_u, MINI_U) \ - V(ceqi, CEQI) \ - V(clti_s, CLTI_S) \ - V(clti_u, CLTI_U) \ - V(clei_s, CLEI_S) \ - V(clei_u, CLEI_U) - -#define MSA_I5_FORMAT(name, opcode, format) \ - void Assembler::name##_##format(MSARegister wd, MSARegister ws, \ - uint32_t imm5) { \ - GenInstrMsaI5(opcode, I5_DF_##format, imm5, ws, wd); \ - } - -#define MSA_I5(name, opcode) \ - MSA_I5_FORMAT(name, opcode, b) \ - MSA_I5_FORMAT(name, opcode, h) \ - MSA_I5_FORMAT(name, opcode, w) \ - MSA_I5_FORMAT(name, opcode, d) - -MSA_I5_LIST(MSA_I5) -#undef MSA_I5 -#undef MSA_I5_FORMAT -#undef MSA_I5_LIST - -#define MSA_I8_LIST(V) \ - V(andi_b, ANDI_B) \ - V(ori_b, ORI_B) \ - V(nori_b, NORI_B) \ - V(xori_b, XORI_B) \ - V(bmnzi_b, BMNZI_B) \ - V(bmzi_b, BMZI_B) \ - V(bseli_b, BSELI_B) \ - V(shf_b, SHF_B) \ - V(shf_h, SHF_H) \ - V(shf_w, SHF_W) - -#define MSA_I8(name, opcode) \ - void Assembler::name(MSARegister wd, MSARegister ws, uint32_t imm8) { \ - GenInstrMsaI8(opcode, imm8, ws, wd); \ - } - -MSA_I8_LIST(MSA_I8) -#undef MSA_I8 -#undef MSA_I8_LIST - -#define MSA_VEC_LIST(V) \ - V(and_v, AND_V) \ - V(or_v, OR_V) \ - V(nor_v, NOR_V) \ - V(xor_v, XOR_V) \ - V(bmnz_v, BMNZ_V) \ - V(bmz_v, BMZ_V) \ - V(bsel_v, BSEL_V) - -#define MSA_VEC(name, opcode) \ - void Assembler::name(MSARegister wd, MSARegister ws, MSARegister wt) { \ - GenInstrMsaVec(opcode, wt, ws, wd); \ - } - -MSA_VEC_LIST(MSA_VEC) -#undef MSA_VEC -#undef MSA_VEC_LIST - -#define MSA_2R_LIST(V) \ - V(pcnt, PCNT) \ - V(nloc, NLOC) \ - V(nlzc, NLZC) - -#define MSA_2R_FORMAT(name, opcode, format) \ - void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \ - GenInstrMsa2R(opcode, MSA_2R_DF_##format, ws, wd); \ - } - -#define MSA_2R(name, opcode) \ - MSA_2R_FORMAT(name, opcode, b) \ - MSA_2R_FORMAT(name, opcode, h) \ - MSA_2R_FORMAT(name, opcode, w) \ - MSA_2R_FORMAT(name, opcode, d) - -MSA_2R_LIST(MSA_2R) -#undef MSA_2R -#undef MSA_2R_FORMAT -#undef MSA_2R_LIST - -#define MSA_FILL(format) \ - void Assembler::fill_##format(MSARegister wd, Register rs) { \ - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); \ - DCHECK(rs.is_valid() && wd.is_valid()); \ - Instr instr = MSA | MSA_2R_FORMAT | FILL | MSA_2R_DF_##format | \ - (rs.code() << kWsShift) | (wd.code() << kWdShift) | \ - MSA_VEC_2R_2RF_MINOR; \ - emit(instr); \ - } - -MSA_FILL(b) -MSA_FILL(h) -MSA_FILL(w) -#undef MSA_FILL - -#define MSA_2RF_LIST(V) \ - V(fclass, FCLASS) \ - V(ftrunc_s, FTRUNC_S) \ - V(ftrunc_u, FTRUNC_U) \ - V(fsqrt, FSQRT) \ - V(frsqrt, FRSQRT) \ - V(frcp, FRCP) \ - V(frint, FRINT) \ - V(flog2, FLOG2) \ - V(fexupl, FEXUPL) \ - V(fexupr, FEXUPR) \ - V(ffql, FFQL) \ - V(ffqr, FFQR) \ - V(ftint_s, FTINT_S) \ - V(ftint_u, FTINT_U) \ - V(ffint_s, FFINT_S) \ - V(ffint_u, FFINT_U) - -#define MSA_2RF_FORMAT(name, opcode, format) \ - void Assembler::name##_##format(MSARegister wd, MSARegister ws) { \ - GenInstrMsa2RF(opcode, MSA_2RF_DF_##format, ws, wd); \ - } - -#define MSA_2RF(name, opcode) \ - MSA_2RF_FORMAT(name, opcode, w) \ - MSA_2RF_FORMAT(name, opcode, d) - -MSA_2RF_LIST(MSA_2RF) -#undef MSA_2RF -#undef MSA_2RF_FORMAT -#undef MSA_2RF_LIST - -#define MSA_3R_LIST(V) \ - V(sll, SLL_MSA) \ - V(sra, SRA_MSA) \ - V(srl, SRL_MSA) \ - V(bclr, BCLR) \ - V(bset, BSET) \ - V(bneg, BNEG) \ - V(binsl, BINSL) \ - V(binsr, BINSR) \ - V(addv, ADDV) \ - V(subv, SUBV) \ - V(max_s, MAX_S) \ - V(max_u, MAX_U) \ - V(min_s, MIN_S) \ - V(min_u, MIN_U) \ - V(max_a, MAX_A) \ - V(min_a, MIN_A) \ - V(ceq, CEQ) \ - V(clt_s, CLT_S) \ - V(clt_u, CLT_U) \ - V(cle_s, CLE_S) \ - V(cle_u, CLE_U) \ - V(add_a, ADD_A) \ - V(adds_a, ADDS_A) \ - V(adds_s, ADDS_S) \ - V(adds_u, ADDS_U) \ - V(ave_s, AVE_S) \ - V(ave_u, AVE_U) \ - V(aver_s, AVER_S) \ - V(aver_u, AVER_U) \ - V(subs_s, SUBS_S) \ - V(subs_u, SUBS_U) \ - V(subsus_u, SUBSUS_U) \ - V(subsuu_s, SUBSUU_S) \ - V(asub_s, ASUB_S) \ - V(asub_u, ASUB_U) \ - V(mulv, MULV) \ - V(maddv, MADDV) \ - V(msubv, MSUBV) \ - V(div_s, DIV_S_MSA) \ - V(div_u, DIV_U) \ - V(mod_s, MOD_S) \ - V(mod_u, MOD_U) \ - V(dotp_s, DOTP_S) \ - V(dotp_u, DOTP_U) \ - V(dpadd_s, DPADD_S) \ - V(dpadd_u, DPADD_U) \ - V(dpsub_s, DPSUB_S) \ - V(dpsub_u, DPSUB_U) \ - V(pckev, PCKEV) \ - V(pckod, PCKOD) \ - V(ilvl, ILVL) \ - V(ilvr, ILVR) \ - V(ilvev, ILVEV) \ - V(ilvod, ILVOD) \ - V(vshf, VSHF) \ - V(srar, SRAR) \ - V(srlr, SRLR) \ - V(hadd_s, HADD_S) \ - V(hadd_u, HADD_U) \ - V(hsub_s, HSUB_S) \ - V(hsub_u, HSUB_U) - -#define MSA_3R_FORMAT(name, opcode, format) \ - void Assembler::name##_##format(MSARegister wd, MSARegister ws, \ - MSARegister wt) { \ - GenInstrMsa3R(opcode, MSA_3R_DF_##format, wt, ws, wd); \ - } - -#define MSA_3R_FORMAT_SLD_SPLAT(name, opcode, format) \ - void Assembler::name##_##format(MSARegister wd, MSARegister ws, \ - Register rt) { \ - GenInstrMsa3R(opcode, MSA_3R_DF_##format, rt, ws, wd); \ - } - -#define MSA_3R(name, opcode) \ - MSA_3R_FORMAT(name, opcode, b) \ - MSA_3R_FORMAT(name, opcode, h) \ - MSA_3R_FORMAT(name, opcode, w) \ - MSA_3R_FORMAT(name, opcode, d) - -#define MSA_3R_SLD_SPLAT(name, opcode) \ - MSA_3R_FORMAT_SLD_SPLAT(name, opcode, b) \ - MSA_3R_FORMAT_SLD_SPLAT(name, opcode, h) \ - MSA_3R_FORMAT_SLD_SPLAT(name, opcode, w) \ - MSA_3R_FORMAT_SLD_SPLAT(name, opcode, d) - -MSA_3R_LIST(MSA_3R) -MSA_3R_SLD_SPLAT(sld, SLD) -MSA_3R_SLD_SPLAT(splat, SPLAT) - -#undef MSA_3R -#undef MSA_3R_FORMAT -#undef MSA_3R_FORMAT_SLD_SPLAT -#undef MSA_3R_SLD_SPLAT -#undef MSA_3R_LIST - -#define MSA_3RF_LIST1(V) \ - V(fcaf, FCAF) \ - V(fcun, FCUN) \ - V(fceq, FCEQ) \ - V(fcueq, FCUEQ) \ - V(fclt, FCLT) \ - V(fcult, FCULT) \ - V(fcle, FCLE) \ - V(fcule, FCULE) \ - V(fsaf, FSAF) \ - V(fsun, FSUN) \ - V(fseq, FSEQ) \ - V(fsueq, FSUEQ) \ - V(fslt, FSLT) \ - V(fsult, FSULT) \ - V(fsle, FSLE) \ - V(fsule, FSULE) \ - V(fadd, FADD) \ - V(fsub, FSUB) \ - V(fmul, FMUL) \ - V(fdiv, FDIV) \ - V(fmadd, FMADD) \ - V(fmsub, FMSUB) \ - V(fexp2, FEXP2) \ - V(fmin, FMIN) \ - V(fmin_a, FMIN_A) \ - V(fmax, FMAX) \ - V(fmax_a, FMAX_A) \ - V(fcor, FCOR) \ - V(fcune, FCUNE) \ - V(fcne, FCNE) \ - V(fsor, FSOR) \ - V(fsune, FSUNE) \ - V(fsne, FSNE) - -#define MSA_3RF_LIST2(V) \ - V(fexdo, FEXDO) \ - V(ftq, FTQ) \ - V(mul_q, MUL_Q) \ - V(madd_q, MADD_Q) \ - V(msub_q, MSUB_Q) \ - V(mulr_q, MULR_Q) \ - V(maddr_q, MADDR_Q) \ - V(msubr_q, MSUBR_Q) - -#define MSA_3RF_FORMAT(name, opcode, df, df_c) \ - void Assembler::name##_##df(MSARegister wd, MSARegister ws, \ - MSARegister wt) { \ - GenInstrMsa3RF(opcode, df_c, wt, ws, wd); \ - } - -#define MSA_3RF_1(name, opcode) \ - MSA_3RF_FORMAT(name, opcode, w, 0) \ - MSA_3RF_FORMAT(name, opcode, d, 1) - -#define MSA_3RF_2(name, opcode) \ - MSA_3RF_FORMAT(name, opcode, h, 0) \ - MSA_3RF_FORMAT(name, opcode, w, 1) - -MSA_3RF_LIST1(MSA_3RF_1) -MSA_3RF_LIST2(MSA_3RF_2) -#undef MSA_3RF_1 -#undef MSA_3RF_2 -#undef MSA_3RF_FORMAT -#undef MSA_3RF_LIST1 -#undef MSA_3RF_LIST2 - -void Assembler::sldi_b(MSARegister wd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(SLDI, ELM_DF_B, n, ws, wd); -} - -void Assembler::sldi_h(MSARegister wd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(SLDI, ELM_DF_H, n, ws, wd); -} - -void Assembler::sldi_w(MSARegister wd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(SLDI, ELM_DF_W, n, ws, wd); -} - -void Assembler::sldi_d(MSARegister wd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(SLDI, ELM_DF_D, n, ws, wd); -} - -void Assembler::splati_b(MSARegister wd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(SPLATI, ELM_DF_B, n, ws, wd); -} - -void Assembler::splati_h(MSARegister wd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(SPLATI, ELM_DF_H, n, ws, wd); -} - -void Assembler::splati_w(MSARegister wd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(SPLATI, ELM_DF_W, n, ws, wd); -} - -void Assembler::splati_d(MSARegister wd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(SPLATI, ELM_DF_D, n, ws, wd); -} - -void Assembler::copy_s_b(Register rd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(COPY_S, ELM_DF_B, n, ws, rd); -} - -void Assembler::copy_s_h(Register rd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(COPY_S, ELM_DF_H, n, ws, rd); -} - -void Assembler::copy_s_w(Register rd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(COPY_S, ELM_DF_W, n, ws, rd); -} - -void Assembler::copy_u_b(Register rd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(COPY_U, ELM_DF_B, n, ws, rd); -} - -void Assembler::copy_u_h(Register rd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(COPY_U, ELM_DF_H, n, ws, rd); -} - -void Assembler::copy_u_w(Register rd, MSARegister ws, uint32_t n) { - GenInstrMsaElm(COPY_U, ELM_DF_W, n, ws, rd); -} - -void Assembler::insert_b(MSARegister wd, uint32_t n, Register rs) { - GenInstrMsaElm(INSERT, ELM_DF_B, n, rs, wd); -} - -void Assembler::insert_h(MSARegister wd, uint32_t n, Register rs) { - GenInstrMsaElm(INSERT, ELM_DF_H, n, rs, wd); -} - -void Assembler::insert_w(MSARegister wd, uint32_t n, Register rs) { - GenInstrMsaElm(INSERT, ELM_DF_W, n, rs, wd); -} - -void Assembler::insve_b(MSARegister wd, uint32_t n, MSARegister ws) { - GenInstrMsaElm(INSVE, ELM_DF_B, n, ws, wd); -} - -void Assembler::insve_h(MSARegister wd, uint32_t n, MSARegister ws) { - GenInstrMsaElm(INSVE, ELM_DF_H, n, ws, wd); -} - -void Assembler::insve_w(MSARegister wd, uint32_t n, MSARegister ws) { - GenInstrMsaElm(INSVE, ELM_DF_W, n, ws, wd); -} - -void Assembler::insve_d(MSARegister wd, uint32_t n, MSARegister ws) { - GenInstrMsaElm(INSVE, ELM_DF_D, n, ws, wd); -} - -void Assembler::move_v(MSARegister wd, MSARegister ws) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(ws.is_valid() && wd.is_valid()); - Instr instr = MSA | MOVE_V | (ws.code() << kWsShift) | - (wd.code() << kWdShift) | MSA_ELM_MINOR; - emit(instr); -} - -void Assembler::ctcmsa(MSAControlRegister cd, Register rs) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(cd.is_valid() && rs.is_valid()); - Instr instr = MSA | CTCMSA | (rs.code() << kWsShift) | - (cd.code() << kWdShift) | MSA_ELM_MINOR; - emit(instr); -} - -void Assembler::cfcmsa(Register rd, MSAControlRegister cs) { - DCHECK(IsMipsArchVariant(kMips32r6) && IsEnabled(MIPS_SIMD)); - DCHECK(rd.is_valid() && cs.is_valid()); - Instr instr = MSA | CFCMSA | (cs.code() << kWsShift) | - (rd.code() << kWdShift) | MSA_ELM_MINOR; - emit(instr); -} - -#define MSA_BIT_LIST(V) \ - V(slli, SLLI) \ - V(srai, SRAI) \ - V(srli, SRLI) \ - V(bclri, BCLRI) \ - V(bseti, BSETI) \ - V(bnegi, BNEGI) \ - V(binsli, BINSLI) \ - V(binsri, BINSRI) \ - V(sat_s, SAT_S) \ - V(sat_u, SAT_U) \ - V(srari, SRARI) \ - V(srlri, SRLRI) - -#define MSA_BIT_FORMAT(name, opcode, format) \ - void Assembler::name##_##format(MSARegister wd, MSARegister ws, \ - uint32_t m) { \ - GenInstrMsaBit(opcode, BIT_DF_##format, m, ws, wd); \ - } - -#define MSA_BIT(name, opcode) \ - MSA_BIT_FORMAT(name, opcode, b) \ - MSA_BIT_FORMAT(name, opcode, h) \ - MSA_BIT_FORMAT(name, opcode, w) \ - MSA_BIT_FORMAT(name, opcode, d) - -MSA_BIT_LIST(MSA_BIT) -#undef MSA_BIT -#undef MSA_BIT_FORMAT -#undef MSA_BIT_LIST - -int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, - intptr_t pc_delta) { - Instr instr = instr_at(pc); - - if (RelocInfo::IsInternalReference(rmode)) { - int32_t* p = reinterpret_cast(pc); - if (*p == 0) { - return 0; // Number of instructions patched. - } - *p += pc_delta; - return 1; // Number of instructions patched. - } else { - DCHECK(RelocInfo::IsInternalReferenceEncoded(rmode)); - if (IsLui(instr)) { - Instr instr1 = instr_at(pc + 0 * kInstrSize); - Instr instr2 = instr_at(pc + 1 * kInstrSize); - DCHECK(IsOri(instr2) || IsJicOrJialc(instr2)); - int32_t imm; - if (IsJicOrJialc(instr2)) { - imm = CreateTargetAddress(instr1, instr2); - } else { - imm = GetLuiOriImmediate(instr1, instr2); - } - - if (imm == kEndOfJumpChain) { - return 0; // Number of instructions patched. - } - imm += pc_delta; - DCHECK_EQ(imm & 3, 0); - instr1 &= ~kImm16Mask; - instr2 &= ~kImm16Mask; - - if (IsJicOrJialc(instr2)) { - uint32_t lui_offset_u, jic_offset_u; - Assembler::UnpackTargetAddressUnsigned(imm, - &lui_offset_u, &jic_offset_u); - instr_at_put(pc + 0 * kInstrSize, instr1 | lui_offset_u); - instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u); - } else { - PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, - 1 * kInstrSize); - } - return 2; // Number of instructions patched. - } else { - UNREACHABLE(); - } - } -} - -void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc, - intptr_t pc_delta) { - Instr instr = instr_at(pc); - - DCHECK(RelocInfo::IsRelativeCodeTarget(rmode)); - if (IsLui(instr)) { - Instr instr1 = instr_at(pc + 0 * kInstrSize); - Instr instr2 = instr_at(pc + 1 * kInstrSize); - Instr instr3 = instr_at(pc + 2 * kInstrSize); - int32_t imm; - Address ori_offset; - if (IsNal(instr2)) { - instr2 = instr3; - ori_offset = 2 * kInstrSize; - } else { - ori_offset = 1 * kInstrSize; - } - DCHECK(IsOri(instr2)); - imm = GetLuiOriImmediate(instr1, instr2); - instr1 &= ~kImm16Mask; - instr2 &= ~kImm16Mask; - - if (imm == kEndOfJumpChain) { - return; - } - imm -= pc_delta; - DCHECK_EQ(imm & 3, 0); - PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2, ori_offset); - return; - } else { - UNREACHABLE(); - } -} - -void Assembler::GrowBuffer() { - // Compute new buffer size. - int old_size = buffer_->size(); - int new_size = std::min(2 * old_size, old_size + 1 * MB); - - // Some internal data structures overflow for very large buffers, - // they must ensure that kMaximalBufferSize is not too large. - if (new_size > kMaximalBufferSize) { - V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer"); - } - - // Set up new buffer. - std::unique_ptr new_buffer = buffer_->Grow(new_size); - DCHECK_EQ(new_size, new_buffer->size()); - byte* new_start = new_buffer->start(); - - // Copy the data. - int pc_delta = new_start - buffer_start_; - int rc_delta = (new_start + new_size) - (buffer_start_ + old_size); - size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos(); - MemMove(new_start, buffer_start_, pc_offset()); - MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(), - reloc_size); - - // Switch buffers. - buffer_ = std::move(new_buffer); - buffer_start_ = new_start; - pc_ += pc_delta; - pc_for_safepoint_ += pc_delta; - reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, - reloc_info_writer.last_pc() + pc_delta); - - // Relocate runtime entries. - base::Vector instructions{buffer_start_, - static_cast(pc_offset())}; - base::Vector reloc_info{reloc_info_writer.pos(), reloc_size}; - for (RelocIterator it(instructions, reloc_info, 0); !it.done(); it.next()) { - RelocInfo::Mode rmode = it.rinfo()->rmode(); - if (rmode == RelocInfo::INTERNAL_REFERENCE_ENCODED || - rmode == RelocInfo::INTERNAL_REFERENCE) { - RelocateInternalReference(rmode, it.rinfo()->pc(), pc_delta); - } - } - - DCHECK(!overflow()); -} - -void Assembler::db(uint8_t data) { - CheckForEmitInForbiddenSlot(); - *reinterpret_cast(pc_) = data; - pc_ += sizeof(uint8_t); -} - -void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) { - CheckForEmitInForbiddenSlot(); - if (!RelocInfo::IsNoInfo(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || - RelocInfo::IsLiteralConstant(rmode)); - RecordRelocInfo(rmode); - } - *reinterpret_cast(pc_) = data; - pc_ += sizeof(uint32_t); -} - -void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) { - CheckForEmitInForbiddenSlot(); - if (!RelocInfo::IsNoInfo(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || - RelocInfo::IsLiteralConstant(rmode)); - RecordRelocInfo(rmode); - } - *reinterpret_cast(pc_) = data; - pc_ += sizeof(uint64_t); -} - -void Assembler::dd(Label* label) { - uint32_t data; - CheckForEmitInForbiddenSlot(); - if (label->is_bound()) { - data = reinterpret_cast(buffer_start_ + label->pos()); - } else { - data = jump_address(label); - unbound_labels_count_++; - internal_reference_positions_.insert(label->pos()); - } - RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); - EmitHelper(data); -} - -void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - if (!ShouldRecordRelocInfo(rmode)) return; - // We do not try to reuse pool constants. - RelocInfo rinfo(reinterpret_cast
(pc_), rmode, data, Code()); - DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here. - reloc_info_writer.Write(&rinfo); -} - -void Assembler::BlockTrampolinePoolFor(int instructions) { - CheckTrampolinePoolQuick(instructions); - BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize); -} - -void Assembler::CheckTrampolinePool() { - // Some small sequences of instructions must not be broken up by the - // insertion of a trampoline pool; such sequences are protected by setting - // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_, - // which are both checked here. Also, recursive calls to CheckTrampolinePool - // are blocked by trampoline_pool_blocked_nesting_. - if ((trampoline_pool_blocked_nesting_ > 0) || - (pc_offset() < no_trampoline_pool_before_)) { - // Emission is currently blocked; make sure we try again as soon as - // possible. - if (trampoline_pool_blocked_nesting_ > 0) { - next_buffer_check_ = pc_offset() + kInstrSize; - } else { - next_buffer_check_ = no_trampoline_pool_before_; - } - return; - } - - DCHECK(!trampoline_emitted_); - DCHECK_GE(unbound_labels_count_, 0); - if (unbound_labels_count_ > 0) { - // First we emit jump (2 instructions), then we emit trampoline pool. - { - BlockTrampolinePoolScope block_trampoline_pool(this); - Label after_pool; - if (IsMipsArchVariant(kMips32r6)) { - bc(&after_pool); - } else { - b(&after_pool); - } - nop(); - - int pool_start = pc_offset(); - for (int i = 0; i < unbound_labels_count_; i++) { - { - if (IsMipsArchVariant(kMips32r6)) { - bc(&after_pool); - nop(); - } else { - GenPCRelativeJump(t8, t9, 0, RelocInfo::NO_INFO, - BranchDelaySlot::PROTECT); - } - } - } - // If unbound_labels_count_ is big enough, label after_pool will - // need a trampoline too, so we must create the trampoline before - // the bind operation to make sure function 'bind' can get this - // information. - trampoline_ = Trampoline(pool_start, unbound_labels_count_); - bind(&after_pool); - - trampoline_emitted_ = true; - // As we are only going to emit trampoline once, we need to prevent any - // further emission. - next_buffer_check_ = kMaxInt; - } - } else { - // Number of branches to unbound label at this point is zero, so we can - // move next buffer check to maximum. - next_buffer_check_ = - pc_offset() + kMaxBranchOffset - kTrampolineSlotsSize * 16; - } - return; -} - -Address Assembler::target_address_at(Address pc) { - Instr instr1 = instr_at(pc); - Instr instr2 = instr_at(pc + kInstrSize); - Instr instr3 = instr_at(pc + 2 * kInstrSize); - // Interpret 2 instructions generated by li (lui/ori) or optimized pairs - // lui/jic, aui/jic or lui/jialc. - if (IsLui(instr1)) { - if (IsOri(instr2)) { - Address target_address; - // Assemble the 32 bit value. - target_address = GetLuiOriImmediate(instr1, instr2); - if (IsAddu(instr3, t9, ra, t9)) { - target_address += pc + kRelativeJumpForBuiltinsOffset; - } - return target_address; - } else if (IsJicOrJialc(instr2)) { - // Assemble the 32 bit value. - return static_cast
(CreateTargetAddress(instr1, instr2)); - } else if (IsNal(instr2)) { - DCHECK(IsOri(instr3)); - Address target_address; - target_address = GetLuiOriImmediate(instr1, instr3); - return target_address + pc + kRelativeCallForBuiltinsOffset; - } - } - - // We should never get here, force a bad address if we do. - UNREACHABLE(); -} - -// On Mips, a target address is stored in a lui/ori instruction pair, each -// of which load 16 bits of the 32-bit address to a register. -// Patching the address must replace both instr, and flush the i-cache. -// On r6, target address is stored in a lui/jic pair, and both instr have to be -// patched. -void Assembler::set_target_value_at(Address pc, uint32_t target, - ICacheFlushMode icache_flush_mode) { - Instr instr1 = instr_at(pc); - Instr instr2 = instr_at(pc + kInstrSize); - -#ifdef DEBUG - // Check we have the result from a li macro-instruction, using instr pair. - DCHECK(IsLui(instr1) && - (IsOri(instr2) || IsJicOrJialc(instr2) || IsNal(instr2))); -#endif - - if (IsJicOrJialc(instr2)) { - // Must use 2 instructions to insure patchable code => use lui and jic - uint32_t lui_offset, jic_offset; - Assembler::UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset); - - instr1 &= ~kImm16Mask; - instr2 &= ~kImm16Mask; - - instr1 |= lui_offset; - instr2 |= jic_offset; - - instr_at_put(pc, instr1); - instr_at_put(pc + kInstrSize, instr2); - } else { - Instr instr3 = instr_at(pc + 2 * kInstrSize); - // If we are using relative calls/jumps for builtins. - if (IsNal(instr2)) { - target -= pc + kRelativeCallForBuiltinsOffset; - } - if (IsAddu(instr3, t9, ra, t9)) { - target -= pc + kRelativeJumpForBuiltinsOffset; - } - // Must use 2 instructions to insure patchable code => just use lui and ori. - // lui rt, upper-16. - // ori rt rt, lower-16. - if (IsNal(instr2)) { - instr1 &= ~kImm16Mask; - instr3 &= ~kImm16Mask; - PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr3, - 2 * kInstrSize); - } else { - instr1 &= ~kImm16Mask; - instr2 &= ~kImm16Mask; - PatchLuiOriImmediate(pc, target, instr1, 0 * kInstrSize, instr2, - 1 * kInstrSize); - } - } - - if (icache_flush_mode != SKIP_ICACHE_FLUSH) { - FlushInstructionCache(pc, 2 * sizeof(int32_t)); - } -} - -void Assembler::GenPCRelativeJump(Register tf, Register ts, int32_t imm32, - RelocInfo::Mode rmode, - BranchDelaySlot bdslot) { - // Order of these instructions is relied upon when patching them - // or when changing imm32 that lui/ori pair loads. - or_(tf, ra, zero_reg); - nal(); // Relative place of nal instruction determines kLongBranchPCOffset. - if (!RelocInfo::IsNoInfo(rmode)) { - RecordRelocInfo(rmode); - } - lui(ts, (imm32 & kHiMask) >> kLuiShift); - ori(ts, ts, (imm32 & kImm16Mask)); - addu(ts, ra, ts); - if (bdslot == USE_DELAY_SLOT) { - or_(ra, tf, zero_reg); - } - jr(ts); - if (bdslot == PROTECT) { - or_(ra, tf, zero_reg); - } -} - -void Assembler::GenPCRelativeJumpAndLink(Register t, int32_t imm32, - RelocInfo::Mode rmode, - BranchDelaySlot bdslot) { - if (!RelocInfo::IsNoInfo(rmode)) { - RecordRelocInfo(rmode); - } - // Order of these instructions is relied upon when patching them - // or when changing imm32 that lui/ori pair loads. - lui(t, (imm32 & kHiMask) >> kLuiShift); - nal(); // Relative place of nal instruction determines kLongBranchPCOffset. - ori(t, t, (imm32 & kImm16Mask)); - addu(t, ra, t); - jalr(t); - if (bdslot == PROTECT) nop(); - set_pc_for_safepoint(); -} - -UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler) - : available_(assembler->GetScratchRegisterList()), - old_available_(*available_) {} - -UseScratchRegisterScope::~UseScratchRegisterScope() { - *available_ = old_available_; -} - -Register UseScratchRegisterScope::Acquire() { - DCHECK_NOT_NULL(available_); - return available_->PopFirst(); -} - -bool UseScratchRegisterScope::hasAvailable() const { - return !available_->is_empty(); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS diff --git a/src/codegen/mips/assembler-mips.h b/src/codegen/mips/assembler-mips.h deleted file mode 100644 index 9bac809f47..0000000000 --- a/src/codegen/mips/assembler-mips.h +++ /dev/null @@ -1,1923 +0,0 @@ -// Copyright (c) 1994-2006 Sun Microsystems Inc. -// All Rights Reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// - Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// -// - Redistribution in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// -// - Neither the name of Sun Microsystems or the names of contributors may -// be used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, -// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// The original source code covered by the above license above has been -// modified significantly by Google Inc. -// Copyright 2012 the V8 project authors. All rights reserved. - -#ifndef V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_ -#define V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_ - -#include -#include - -#include - -#include "src/codegen/assembler.h" -#include "src/codegen/external-reference.h" -#include "src/codegen/label.h" -#include "src/codegen/mips/constants-mips.h" -#include "src/codegen/mips/register-mips.h" -#include "src/objects/smi.h" - -namespace v8 { -namespace internal { - -class SafepointTableBuilder; - -// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls. -enum BranchDelaySlot { USE_DELAY_SLOT, PROTECT }; - -// ----------------------------------------------------------------------------- -// Machine instruction Operands. - -// Class Operand represents a shifter operand in data processing instructions. -class Operand { - public: - // Immediate. - V8_INLINE explicit Operand(int32_t immediate, - RelocInfo::Mode rmode = RelocInfo::NO_INFO) - : rm_(no_reg), rmode_(rmode) { - value_.immediate = immediate; - } - V8_INLINE explicit Operand(const ExternalReference& f) - : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) { - value_.immediate = static_cast(f.address()); - } - explicit Operand(Handle handle); - V8_INLINE explicit Operand(Smi value) - : rm_(no_reg), rmode_(RelocInfo::NO_INFO) { - value_.immediate = static_cast(value.ptr()); - } - - static Operand EmbeddedNumber(double number); // Smi or HeapNumber. - static Operand EmbeddedStringConstant(const StringConstantBase* str); - - // Register. - V8_INLINE explicit Operand(Register rm) : rm_(rm) {} - - // Return true if this is a register operand. - V8_INLINE bool is_reg() const; - - inline int32_t immediate() const; - - bool IsImmediate() const { return !rm_.is_valid(); } - - HeapObjectRequest heap_object_request() const { - DCHECK(IsHeapObjectRequest()); - return value_.heap_object_request; - } - - bool IsHeapObjectRequest() const { - DCHECK_IMPLIES(is_heap_object_request_, IsImmediate()); - DCHECK_IMPLIES(is_heap_object_request_, - rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT || - rmode_ == RelocInfo::CODE_TARGET); - return is_heap_object_request_; - } - - Register rm() const { return rm_; } - - RelocInfo::Mode rmode() const { return rmode_; } - - private: - Register rm_; - union Value { - Value() {} - HeapObjectRequest heap_object_request; // if is_heap_object_request_ - int32_t immediate; // otherwise - } value_; // valid if rm_ == no_reg - bool is_heap_object_request_ = false; - RelocInfo::Mode rmode_; - - friend class Assembler; - // friend class MacroAssembler; -}; - -// On MIPS we have only one addressing mode with base_reg + offset. -// Class MemOperand represents a memory operand in load and store instructions. -class V8_EXPORT_PRIVATE MemOperand : public Operand { - public: - // Immediate value attached to offset. - enum OffsetAddend { offset_minus_one = -1, offset_zero = 0 }; - - explicit MemOperand(Register rn, int32_t offset = 0); - explicit MemOperand(Register rn, int32_t unit, int32_t multiplier, - OffsetAddend offset_addend = offset_zero); - int32_t offset() const { return offset_; } - - bool OffsetIsInt16Encodable() const { return is_int16(offset_); } - - private: - int32_t offset_; - - friend class Assembler; -}; - -class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { - public: - // Create an assembler. Instructions and relocation information are emitted - // into a buffer, with the instructions starting from the beginning and the - // relocation information starting from the end of the buffer. See CodeDesc - // for a detailed comment on the layout (globals.h). - // - // If the provided buffer is nullptr, the assembler allocates and grows its - // own buffer. Otherwise it takes ownership of the provided buffer. - explicit Assembler(const AssemblerOptions&, - std::unique_ptr = {}); - - virtual ~Assembler() {} - - // GetCode emits any pending (non-emitted) code and fills the descriptor desc. - static constexpr int kNoHandlerTable = 0; - static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr; - void GetCode(Isolate* isolate, CodeDesc* desc, - SafepointTableBuilder* safepoint_table_builder, - int handler_table_offset); - - // Convenience wrapper for code without safepoint or handler tables. - void GetCode(Isolate* isolate, CodeDesc* desc) { - GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable); - } - - // Unused on this architecture. - void MaybeEmitOutOfLineConstantPool() {} - - // Mips uses BlockTrampolinePool to prevent generating trampoline inside a - // continuous instruction block. For Call instrution, it prevents generating - // trampoline between jalr and delay slot instruction. In the destructor of - // BlockTrampolinePool, it must check if it needs to generate trampoline - // immediately, if it does not do this, the branch range will go beyond the - // max branch offset, that means the pc_offset after call CheckTrampolinePool - // may have changed. So we use pc_for_safepoint_ here for safepoint record. - int pc_offset_for_safepoint() { - return static_cast(pc_for_safepoint_ - buffer_start_); - } - - // Label operations & relative jumps (PPUM Appendix D). - // - // Takes a branch opcode (cc) and a label (L) and generates - // either a backward branch or a forward branch and links it - // to the label fixup chain. Usage: - // - // Label L; // unbound label - // j(cc, &L); // forward branch to unbound label - // bind(&L); // bind label to the current pc - // j(cc, &L); // backward branch to bound label - // bind(&L); // illegal: a label may be bound only once - // - // Note: The same Label can be used for forward and backward branches - // but it may be bound only once. - void bind(Label* L); // Binds an unbound label L to current code position. - - enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; - - // Determines if Label is bound and near enough so that branch instruction - // can be used to reach it, instead of jump instruction. - bool is_near(Label* L); - bool is_near(Label* L, OffsetSize bits); - bool is_near_branch(Label* L); - inline bool is_near_pre_r6(Label* L) { - DCHECK(!IsMipsArchVariant(kMips32r6)); - return pc_offset() - L->pos() < kMaxBranchOffset - 4 * kInstrSize; - } - inline bool is_near_r6(Label* L) { - DCHECK(IsMipsArchVariant(kMips32r6)); - return pc_offset() - L->pos() < kMaxCompactBranchOffset - 4 * kInstrSize; - } - - int BranchOffset(Instr instr); - - // Returns the branch offset to the given label from the current code - // position. Links the label to the current position if it is still unbound. - // Manages the jump elimination optimization if the second parameter is true. - int32_t branch_offset_helper(Label* L, OffsetSize bits); - inline int32_t branch_offset(Label* L) { - return branch_offset_helper(L, OffsetSize::kOffset16); - } - inline int32_t branch_offset21(Label* L) { - return branch_offset_helper(L, OffsetSize::kOffset21); - } - inline int32_t branch_offset26(Label* L) { - return branch_offset_helper(L, OffsetSize::kOffset26); - } - inline int32_t shifted_branch_offset(Label* L) { - return branch_offset(L) >> 2; - } - inline int32_t shifted_branch_offset21(Label* L) { - return branch_offset21(L) >> 2; - } - inline int32_t shifted_branch_offset26(Label* L) { - return branch_offset26(L) >> 2; - } - uint32_t jump_address(Label* L); - uint32_t branch_long_offset(Label* L); - - // Puts a labels target address at the given position. - // The high 8 bits are set to zero. - void label_at_put(Label* L, int at_offset); - - // Read/Modify the code target address in the branch/call instruction at pc. - // The isolate argument is unused (and may be nullptr) when skipping flushing. - static Address target_address_at(Address pc); - V8_INLINE static void set_target_address_at( - Address pc, Address target, - ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { - set_target_value_at(pc, static_cast(target), icache_flush_mode); - } - // On MIPS there is no Constant Pool so we skip that parameter. - V8_INLINE static Address target_address_at(Address pc, - Address constant_pool) { - return target_address_at(pc); - } - V8_INLINE static void set_target_address_at( - Address pc, Address constant_pool, Address target, - ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) { - set_target_address_at(pc, target, icache_flush_mode); - } - - static void set_target_value_at( - Address pc, uint32_t target, - ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED); - - // This sets the branch destination (which gets loaded at the call address). - // This is for calls and branches within generated code. The serializer - // has already deserialized the lui/ori instructions etc. - inline static void deserialization_set_special_target_at( - Address instruction_payload, Code code, Address target); - - // Get the size of the special target encoded at 'instruction_payload'. - inline static int deserialization_special_target_size( - Address instruction_payload); - - // This sets the internal reference at the pc. - inline static void deserialization_set_target_internal_reference_at( - Address pc, Address target, - RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE); - - // Difference between address of current opcode and target address offset. - static constexpr int kBranchPCOffset = kInstrSize; - - // Difference between address of current opcode and target address offset, - // when we are generatinga sequence of instructions for long relative PC - // branches. It is distance between address of the first instruction in - // the jump sequence, and the value that ra gets after calling nal(). - static constexpr int kLongBranchPCOffset = 3 * kInstrSize; - - // Adjust ra register in branch delay slot of bal instruction in order to skip - // instructions not needed after optimization of PIC in - // TurboAssembler::BranchAndLink method. - static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 3 * kInstrSize; - - // Offset of target relative address in calls/jumps for builtins. It is - // distance between instruction that is placed just after calling - // RecordRelocInfo, and the value that ra gets aftr calling nal(). - static constexpr int kRelativeJumpForBuiltinsOffset = 1 * kInstrSize; - // Relative target address of jumps for builtins when we use lui, ori, dsll, - // ori sequence when loading address that cannot fit into 32 bits. - static constexpr int kRelativeCallForBuiltinsOffset = 3 * kInstrSize; - - // Here we are patching the address in the LUI/ORI instruction pair. - // These values are used in the serialization process and must be zero for - // MIPS platform, as Code, Embedded Object or External-reference pointers - // are split across two consecutive instructions and don't exist separately - // in the code, so the serializer should not step forwards in memory after - // a target is resolved and written. - - static constexpr int kSpecialTargetSize = 0; - - // Number of consecutive instructions used to store 32bit constant. This - // constant is used in RelocInfo::target_address_address() function to tell - // serializer address of the instruction that follows LUI/ORI instruction - // pair. - static constexpr int kInstructionsFor32BitConstant = 2; - - // Max offset for instructions with 16-bit offset field - static constexpr int kMaxBranchOffset = (1 << (18 - 1)) - 1; - - // Max offset for compact branch instructions with 26-bit offset field - static constexpr int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1; - - static constexpr int kTrampolineSlotsSize = - IsMipsArchVariant(kMips32r6) ? 2 * kInstrSize : 7 * kInstrSize; - - RegList* GetScratchRegisterList() { return &scratch_register_list_; } - - // --------------------------------------------------------------------------- - // Code generation. - - // Insert the smallest number of nop instructions - // possible to align the pc offset to a multiple - // of m. m must be a power of 2 (>= 4). - void Align(int m); - // Insert the smallest number of zero bytes possible to align the pc offset - // to a mulitple of m. m must be a power of 2 (>= 2). - void DataAlign(int m); - // Aligns code to something that's optimal for a jump target for the platform. - void CodeTargetAlign(); - void LoopHeaderAlign() { CodeTargetAlign(); } - - // Different nop operations are used by the code generator to detect certain - // states of the generated code. - enum NopMarkerTypes { - NON_MARKING_NOP = 0, - DEBUG_BREAK_NOP, - // IC markers. - PROPERTY_ACCESS_INLINED, - PROPERTY_ACCESS_INLINED_CONTEXT, - PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, - // Helper values. - LAST_CODE_MARKER, - FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, - }; - - // Type == 0 is the default non-marking nop. For mips this is a - // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero - // marking, to avoid conflict with ssnop and ehb instructions. - void nop(unsigned int type = 0) { - DCHECK_LT(type, 32); - Register nop_rt_reg = (type == 0) ? zero_reg : at; - sll(zero_reg, nop_rt_reg, type, true); - } - - // --------Branch-and-jump-instructions---------- - // We don't use likely variant of instructions. - void b(int16_t offset); - inline void b(Label* L) { b(shifted_branch_offset(L)); } - void bal(int16_t offset); - inline void bal(Label* L) { bal(shifted_branch_offset(L)); } - void bc(int32_t offset); - inline void bc(Label* L) { bc(shifted_branch_offset26(L)); } - void balc(int32_t offset); - inline void balc(Label* L) { balc(shifted_branch_offset26(L)); } - - void beq(Register rs, Register rt, int16_t offset); - inline void beq(Register rs, Register rt, Label* L) { - beq(rs, rt, shifted_branch_offset(L)); - } - void bgez(Register rs, int16_t offset); - void bgezc(Register rt, int16_t offset); - inline void bgezc(Register rt, Label* L) { - bgezc(rt, shifted_branch_offset(L)); - } - void bgeuc(Register rs, Register rt, int16_t offset); - inline void bgeuc(Register rs, Register rt, Label* L) { - bgeuc(rs, rt, shifted_branch_offset(L)); - } - void bgec(Register rs, Register rt, int16_t offset); - inline void bgec(Register rs, Register rt, Label* L) { - bgec(rs, rt, shifted_branch_offset(L)); - } - void bgezal(Register rs, int16_t offset); - void bgezalc(Register rt, int16_t offset); - inline void bgezalc(Register rt, Label* L) { - bgezalc(rt, shifted_branch_offset(L)); - } - void bgezall(Register rs, int16_t offset); - inline void bgezall(Register rs, Label* L) { - bgezall(rs, branch_offset(L) >> 2); - } - void bgtz(Register rs, int16_t offset); - void bgtzc(Register rt, int16_t offset); - inline void bgtzc(Register rt, Label* L) { - bgtzc(rt, shifted_branch_offset(L)); - } - void blez(Register rs, int16_t offset); - void blezc(Register rt, int16_t offset); - inline void blezc(Register rt, Label* L) { - blezc(rt, shifted_branch_offset(L)); - } - void bltz(Register rs, int16_t offset); - void bltzc(Register rt, int16_t offset); - inline void bltzc(Register rt, Label* L) { - bltzc(rt, shifted_branch_offset(L)); - } - void bltuc(Register rs, Register rt, int16_t offset); - inline void bltuc(Register rs, Register rt, Label* L) { - bltuc(rs, rt, shifted_branch_offset(L)); - } - void bltc(Register rs, Register rt, int16_t offset); - inline void bltc(Register rs, Register rt, Label* L) { - bltc(rs, rt, shifted_branch_offset(L)); - } - void bltzal(Register rs, int16_t offset); - void nal() { bltzal(zero_reg, 0); } - void blezalc(Register rt, int16_t offset); - inline void blezalc(Register rt, Label* L) { - blezalc(rt, shifted_branch_offset(L)); - } - void bltzalc(Register rt, int16_t offset); - inline void bltzalc(Register rt, Label* L) { - bltzalc(rt, shifted_branch_offset(L)); - } - void bgtzalc(Register rt, int16_t offset); - inline void bgtzalc(Register rt, Label* L) { - bgtzalc(rt, shifted_branch_offset(L)); - } - void beqzalc(Register rt, int16_t offset); - inline void beqzalc(Register rt, Label* L) { - beqzalc(rt, shifted_branch_offset(L)); - } - void beqc(Register rs, Register rt, int16_t offset); - inline void beqc(Register rs, Register rt, Label* L) { - beqc(rs, rt, shifted_branch_offset(L)); - } - void beqzc(Register rs, int32_t offset); - inline void beqzc(Register rs, Label* L) { - beqzc(rs, shifted_branch_offset21(L)); - } - void bnezalc(Register rt, int16_t offset); - inline void bnezalc(Register rt, Label* L) { - bnezalc(rt, shifted_branch_offset(L)); - } - void bnec(Register rs, Register rt, int16_t offset); - inline void bnec(Register rs, Register rt, Label* L) { - bnec(rs, rt, shifted_branch_offset(L)); - } - void bnezc(Register rt, int32_t offset); - inline void bnezc(Register rt, Label* L) { - bnezc(rt, shifted_branch_offset21(L)); - } - void bne(Register rs, Register rt, int16_t offset); - inline void bne(Register rs, Register rt, Label* L) { - bne(rs, rt, shifted_branch_offset(L)); - } - void bovc(Register rs, Register rt, int16_t offset); - inline void bovc(Register rs, Register rt, Label* L) { - bovc(rs, rt, shifted_branch_offset(L)); - } - void bnvc(Register rs, Register rt, int16_t offset); - inline void bnvc(Register rs, Register rt, Label* L) { - bnvc(rs, rt, shifted_branch_offset(L)); - } - - // Never use the int16_t b(l)cond version with a branch offset - // instead of using the Label* version. - - // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits. - void j(int32_t target); - void jal(int32_t target); - void jalr(Register rs, Register rd = ra); - void jr(Register target); - void jic(Register rt, int16_t offset); - void jialc(Register rt, int16_t offset); - - // -------Data-processing-instructions--------- - - // Arithmetic. - void addu(Register rd, Register rs, Register rt); - void subu(Register rd, Register rs, Register rt); - void mult(Register rs, Register rt); - void multu(Register rs, Register rt); - void div(Register rs, Register rt); - void divu(Register rs, Register rt); - void div(Register rd, Register rs, Register rt); - void divu(Register rd, Register rs, Register rt); - void mod(Register rd, Register rs, Register rt); - void modu(Register rd, Register rs, Register rt); - void mul(Register rd, Register rs, Register rt); - void muh(Register rd, Register rs, Register rt); - void mulu(Register rd, Register rs, Register rt); - void muhu(Register rd, Register rs, Register rt); - - void addiu(Register rd, Register rs, int32_t j); - - // Logical. - void and_(Register rd, Register rs, Register rt); - void or_(Register rd, Register rs, Register rt); - void xor_(Register rd, Register rs, Register rt); - void nor(Register rd, Register rs, Register rt); - - void andi(Register rd, Register rs, int32_t j); - void ori(Register rd, Register rs, int32_t j); - void xori(Register rd, Register rs, int32_t j); - void lui(Register rd, int32_t j); - void aui(Register rs, Register rt, int32_t j); - - // Shifts. - // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop - // and may cause problems in normal code. coming_from_nop makes sure this - // doesn't happen. - void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false); - void sllv(Register rd, Register rt, Register rs); - void srl(Register rd, Register rt, uint16_t sa); - void srlv(Register rd, Register rt, Register rs); - void sra(Register rt, Register rd, uint16_t sa); - void srav(Register rt, Register rd, Register rs); - void rotr(Register rd, Register rt, uint16_t sa); - void rotrv(Register rd, Register rt, Register rs); - - // ------------Memory-instructions------------- - - void lb(Register rd, const MemOperand& rs); - void lbu(Register rd, const MemOperand& rs); - void lh(Register rd, const MemOperand& rs); - void lhu(Register rd, const MemOperand& rs); - void lw(Register rd, const MemOperand& rs); - void lwl(Register rd, const MemOperand& rs); - void lwr(Register rd, const MemOperand& rs); - void sb(Register rd, const MemOperand& rs); - void sh(Register rd, const MemOperand& rs); - void sw(Register rd, const MemOperand& rs); - void swl(Register rd, const MemOperand& rs); - void swr(Register rd, const MemOperand& rs); - - // ----------Atomic instructions-------------- - - void ll(Register rd, const MemOperand& rs); - void sc(Register rd, const MemOperand& rs); - void llx(Register rd, const MemOperand& rs); - void scx(Register rd, const MemOperand& rs); - - // ---------PC-Relative-instructions----------- - - void addiupc(Register rs, int32_t imm19); - void lwpc(Register rs, int32_t offset19); - void auipc(Register rs, int16_t imm16); - void aluipc(Register rs, int16_t imm16); - - // ----------------Prefetch-------------------- - - void pref(int32_t hint, const MemOperand& rs); - - // -------------Misc-instructions-------------- - - // Break / Trap instructions. - void break_(uint32_t code, bool break_as_stop = false); - void stop(uint32_t code = kMaxStopCode); - void tge(Register rs, Register rt, uint16_t code); - void tgeu(Register rs, Register rt, uint16_t code); - void tlt(Register rs, Register rt, uint16_t code); - void tltu(Register rs, Register rt, uint16_t code); - void teq(Register rs, Register rt, uint16_t code); - void tne(Register rs, Register rt, uint16_t code); - - // Memory barrier instruction. - void sync(); - - // Move from HI/LO register. - void mfhi(Register rd); - void mflo(Register rd); - - // Set on less than. - void slt(Register rd, Register rs, Register rt); - void sltu(Register rd, Register rs, Register rt); - void slti(Register rd, Register rs, int32_t j); - void sltiu(Register rd, Register rs, int32_t j); - - // Conditional move. - void movz(Register rd, Register rs, Register rt); - void movn(Register rd, Register rs, Register rt); - void movt(Register rd, Register rs, uint16_t cc = 0); - void movf(Register rd, Register rs, uint16_t cc = 0); - - void sel(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); - void sel_s(FPURegister fd, FPURegister fs, FPURegister ft); - void sel_d(FPURegister fd, FPURegister fs, FPURegister ft); - void seleqz(Register rd, Register rs, Register rt); - void seleqz(SecondaryField fmt, FPURegister fd, FPURegister fs, - FPURegister ft); - void selnez(Register rd, Register rs, Register rt); - void selnez(SecondaryField fmt, FPURegister fd, FPURegister fs, - FPURegister ft); - void seleqz_d(FPURegister fd, FPURegister fs, FPURegister ft); - void seleqz_s(FPURegister fd, FPURegister fs, FPURegister ft); - void selnez_d(FPURegister fd, FPURegister fs, FPURegister ft); - void selnez_s(FPURegister fd, FPURegister fs, FPURegister ft); - - void movz_s(FPURegister fd, FPURegister fs, Register rt); - void movz_d(FPURegister fd, FPURegister fs, Register rt); - void movt_s(FPURegister fd, FPURegister fs, uint16_t cc = 0); - void movt_d(FPURegister fd, FPURegister fs, uint16_t cc = 0); - void movf_s(FPURegister fd, FPURegister fs, uint16_t cc = 0); - void movf_d(FPURegister fd, FPURegister fs, uint16_t cc = 0); - void movn_s(FPURegister fd, FPURegister fs, Register rt); - void movn_d(FPURegister fd, FPURegister fs, Register rt); - // Bit twiddling. - void clz(Register rd, Register rs); - void ins_(Register rt, Register rs, uint16_t pos, uint16_t size); - void ext_(Register rt, Register rs, uint16_t pos, uint16_t size); - void bitswap(Register rd, Register rt); - void align(Register rd, Register rs, Register rt, uint8_t bp); - - void wsbh(Register rd, Register rt); - void seh(Register rd, Register rt); - void seb(Register rd, Register rt); - - // --------Coprocessor-instructions---------------- - - // Load, store, and move. - void lwc1(FPURegister fd, const MemOperand& src); - void swc1(FPURegister fs, const MemOperand& dst); - - void mtc1(Register rt, FPURegister fs); - void mthc1(Register rt, FPURegister fs); - - void mfc1(Register rt, FPURegister fs); - void mfhc1(Register rt, FPURegister fs); - - void ctc1(Register rt, FPUControlRegister fs); - void cfc1(Register rt, FPUControlRegister fs); - - // Arithmetic. - void add_s(FPURegister fd, FPURegister fs, FPURegister ft); - void add_d(FPURegister fd, FPURegister fs, FPURegister ft); - void sub_s(FPURegister fd, FPURegister fs, FPURegister ft); - void sub_d(FPURegister fd, FPURegister fs, FPURegister ft); - void mul_s(FPURegister fd, FPURegister fs, FPURegister ft); - void mul_d(FPURegister fd, FPURegister fs, FPURegister ft); - void madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); - void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); - void msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); - void msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); - void maddf_s(FPURegister fd, FPURegister fs, FPURegister ft); - void maddf_d(FPURegister fd, FPURegister fs, FPURegister ft); - void msubf_s(FPURegister fd, FPURegister fs, FPURegister ft); - void msubf_d(FPURegister fd, FPURegister fs, FPURegister ft); - void div_s(FPURegister fd, FPURegister fs, FPURegister ft); - void div_d(FPURegister fd, FPURegister fs, FPURegister ft); - void abs_s(FPURegister fd, FPURegister fs); - void abs_d(FPURegister fd, FPURegister fs); - void mov_d(FPURegister fd, FPURegister fs); - void mov_s(FPURegister fd, FPURegister fs); - void neg_s(FPURegister fd, FPURegister fs); - void neg_d(FPURegister fd, FPURegister fs); - void sqrt_s(FPURegister fd, FPURegister fs); - void sqrt_d(FPURegister fd, FPURegister fs); - void rsqrt_s(FPURegister fd, FPURegister fs); - void rsqrt_d(FPURegister fd, FPURegister fs); - void recip_d(FPURegister fd, FPURegister fs); - void recip_s(FPURegister fd, FPURegister fs); - - // Conversion. - void cvt_w_s(FPURegister fd, FPURegister fs); - void cvt_w_d(FPURegister fd, FPURegister fs); - void trunc_w_s(FPURegister fd, FPURegister fs); - void trunc_w_d(FPURegister fd, FPURegister fs); - void round_w_s(FPURegister fd, FPURegister fs); - void round_w_d(FPURegister fd, FPURegister fs); - void floor_w_s(FPURegister fd, FPURegister fs); - void floor_w_d(FPURegister fd, FPURegister fs); - void ceil_w_s(FPURegister fd, FPURegister fs); - void ceil_w_d(FPURegister fd, FPURegister fs); - void rint_s(FPURegister fd, FPURegister fs); - void rint_d(FPURegister fd, FPURegister fs); - void rint(SecondaryField fmt, FPURegister fd, FPURegister fs); - - void cvt_l_s(FPURegister fd, FPURegister fs); - void cvt_l_d(FPURegister fd, FPURegister fs); - void trunc_l_s(FPURegister fd, FPURegister fs); - void trunc_l_d(FPURegister fd, FPURegister fs); - void round_l_s(FPURegister fd, FPURegister fs); - void round_l_d(FPURegister fd, FPURegister fs); - void floor_l_s(FPURegister fd, FPURegister fs); - void floor_l_d(FPURegister fd, FPURegister fs); - void ceil_l_s(FPURegister fd, FPURegister fs); - void ceil_l_d(FPURegister fd, FPURegister fs); - - void class_s(FPURegister fd, FPURegister fs); - void class_d(FPURegister fd, FPURegister fs); - - void min(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); - void mina(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); - void max(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); - void maxa(SecondaryField fmt, FPURegister fd, FPURegister fs, FPURegister ft); - void min_s(FPURegister fd, FPURegister fs, FPURegister ft); - void min_d(FPURegister fd, FPURegister fs, FPURegister ft); - void max_s(FPURegister fd, FPURegister fs, FPURegister ft); - void max_d(FPURegister fd, FPURegister fs, FPURegister ft); - void mina_s(FPURegister fd, FPURegister fs, FPURegister ft); - void mina_d(FPURegister fd, FPURegister fs, FPURegister ft); - void maxa_s(FPURegister fd, FPURegister fs, FPURegister ft); - void maxa_d(FPURegister fd, FPURegister fs, FPURegister ft); - - void cvt_s_w(FPURegister fd, FPURegister fs); - void cvt_s_l(FPURegister fd, FPURegister fs); - void cvt_s_d(FPURegister fd, FPURegister fs); - - void cvt_d_w(FPURegister fd, FPURegister fs); - void cvt_d_l(FPURegister fd, FPURegister fs); - void cvt_d_s(FPURegister fd, FPURegister fs); - - // Conditions and branches for MIPSr6. - void cmp(FPUCondition cond, SecondaryField fmt, FPURegister fd, - FPURegister ft, FPURegister fs); - void cmp_s(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); - void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); - - void bc1eqz(int16_t offset, FPURegister ft); - inline void bc1eqz(Label* L, FPURegister ft) { - bc1eqz(shifted_branch_offset(L), ft); - } - void bc1nez(int16_t offset, FPURegister ft); - inline void bc1nez(Label* L, FPURegister ft) { - bc1nez(shifted_branch_offset(L), ft); - } - - // Conditions and branches for non MIPSr6. - void c(FPUCondition cond, SecondaryField fmt, FPURegister ft, FPURegister fs, - uint16_t cc = 0); - void c_s(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); - void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); - - void bc1f(int16_t offset, uint16_t cc = 0); - inline void bc1f(Label* L, uint16_t cc = 0) { - bc1f(shifted_branch_offset(L), cc); - } - void bc1t(int16_t offset, uint16_t cc = 0); - inline void bc1t(Label* L, uint16_t cc = 0) { - bc1t(shifted_branch_offset(L), cc); - } - void fcmp(FPURegister src1, const double src2, FPUCondition cond); - - // MSA instructions - void bz_v(MSARegister wt, int16_t offset); - inline void bz_v(MSARegister wt, Label* L) { - bz_v(wt, shifted_branch_offset(L)); - } - void bz_b(MSARegister wt, int16_t offset); - inline void bz_b(MSARegister wt, Label* L) { - bz_b(wt, shifted_branch_offset(L)); - } - void bz_h(MSARegister wt, int16_t offset); - inline void bz_h(MSARegister wt, Label* L) { - bz_h(wt, shifted_branch_offset(L)); - } - void bz_w(MSARegister wt, int16_t offset); - inline void bz_w(MSARegister wt, Label* L) { - bz_w(wt, shifted_branch_offset(L)); - } - void bz_d(MSARegister wt, int16_t offset); - inline void bz_d(MSARegister wt, Label* L) { - bz_d(wt, shifted_branch_offset(L)); - } - void bnz_v(MSARegister wt, int16_t offset); - inline void bnz_v(MSARegister wt, Label* L) { - bnz_v(wt, shifted_branch_offset(L)); - } - void bnz_b(MSARegister wt, int16_t offset); - inline void bnz_b(MSARegister wt, Label* L) { - bnz_b(wt, shifted_branch_offset(L)); - } - void bnz_h(MSARegister wt, int16_t offset); - inline void bnz_h(MSARegister wt, Label* L) { - bnz_h(wt, shifted_branch_offset(L)); - } - void bnz_w(MSARegister wt, int16_t offset); - inline void bnz_w(MSARegister wt, Label* L) { - bnz_w(wt, shifted_branch_offset(L)); - } - void bnz_d(MSARegister wt, int16_t offset); - inline void bnz_d(MSARegister wt, Label* L) { - bnz_d(wt, shifted_branch_offset(L)); - } - - void ld_b(MSARegister wd, const MemOperand& rs); - void ld_h(MSARegister wd, const MemOperand& rs); - void ld_w(MSARegister wd, const MemOperand& rs); - void ld_d(MSARegister wd, const MemOperand& rs); - void st_b(MSARegister wd, const MemOperand& rs); - void st_h(MSARegister wd, const MemOperand& rs); - void st_w(MSARegister wd, const MemOperand& rs); - void st_d(MSARegister wd, const MemOperand& rs); - - void ldi_b(MSARegister wd, int32_t imm10); - void ldi_h(MSARegister wd, int32_t imm10); - void ldi_w(MSARegister wd, int32_t imm10); - void ldi_d(MSARegister wd, int32_t imm10); - - void addvi_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void addvi_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void addvi_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void addvi_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void subvi_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void subvi_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void subvi_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void subvi_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void maxi_s_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void maxi_s_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void maxi_s_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void maxi_s_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void maxi_u_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void maxi_u_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void maxi_u_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void maxi_u_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void mini_s_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void mini_s_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void mini_s_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void mini_s_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void mini_u_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void mini_u_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void mini_u_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void mini_u_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void ceqi_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void ceqi_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void ceqi_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void ceqi_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void clti_s_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void clti_s_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void clti_s_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void clti_s_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void clti_u_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void clti_u_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void clti_u_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void clti_u_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void clei_s_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void clei_s_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void clei_s_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void clei_s_d(MSARegister wd, MSARegister ws, uint32_t imm5); - void clei_u_b(MSARegister wd, MSARegister ws, uint32_t imm5); - void clei_u_h(MSARegister wd, MSARegister ws, uint32_t imm5); - void clei_u_w(MSARegister wd, MSARegister ws, uint32_t imm5); - void clei_u_d(MSARegister wd, MSARegister ws, uint32_t imm5); - - void andi_b(MSARegister wd, MSARegister ws, uint32_t imm8); - void ori_b(MSARegister wd, MSARegister ws, uint32_t imm8); - void nori_b(MSARegister wd, MSARegister ws, uint32_t imm8); - void xori_b(MSARegister wd, MSARegister ws, uint32_t imm8); - void bmnzi_b(MSARegister wd, MSARegister ws, uint32_t imm8); - void bmzi_b(MSARegister wd, MSARegister ws, uint32_t imm8); - void bseli_b(MSARegister wd, MSARegister ws, uint32_t imm8); - void shf_b(MSARegister wd, MSARegister ws, uint32_t imm8); - void shf_h(MSARegister wd, MSARegister ws, uint32_t imm8); - void shf_w(MSARegister wd, MSARegister ws, uint32_t imm8); - - void and_v(MSARegister wd, MSARegister ws, MSARegister wt); - void or_v(MSARegister wd, MSARegister ws, MSARegister wt); - void nor_v(MSARegister wd, MSARegister ws, MSARegister wt); - void xor_v(MSARegister wd, MSARegister ws, MSARegister wt); - void bmnz_v(MSARegister wd, MSARegister ws, MSARegister wt); - void bmz_v(MSARegister wd, MSARegister ws, MSARegister wt); - void bsel_v(MSARegister wd, MSARegister ws, MSARegister wt); - - void fill_b(MSARegister wd, Register rs); - void fill_h(MSARegister wd, Register rs); - void fill_w(MSARegister wd, Register rs); - void pcnt_b(MSARegister wd, MSARegister ws); - void pcnt_h(MSARegister wd, MSARegister ws); - void pcnt_w(MSARegister wd, MSARegister ws); - void pcnt_d(MSARegister wd, MSARegister ws); - void nloc_b(MSARegister wd, MSARegister ws); - void nloc_h(MSARegister wd, MSARegister ws); - void nloc_w(MSARegister wd, MSARegister ws); - void nloc_d(MSARegister wd, MSARegister ws); - void nlzc_b(MSARegister wd, MSARegister ws); - void nlzc_h(MSARegister wd, MSARegister ws); - void nlzc_w(MSARegister wd, MSARegister ws); - void nlzc_d(MSARegister wd, MSARegister ws); - - void fclass_w(MSARegister wd, MSARegister ws); - void fclass_d(MSARegister wd, MSARegister ws); - void ftrunc_s_w(MSARegister wd, MSARegister ws); - void ftrunc_s_d(MSARegister wd, MSARegister ws); - void ftrunc_u_w(MSARegister wd, MSARegister ws); - void ftrunc_u_d(MSARegister wd, MSARegister ws); - void fsqrt_w(MSARegister wd, MSARegister ws); - void fsqrt_d(MSARegister wd, MSARegister ws); - void frsqrt_w(MSARegister wd, MSARegister ws); - void frsqrt_d(MSARegister wd, MSARegister ws); - void frcp_w(MSARegister wd, MSARegister ws); - void frcp_d(MSARegister wd, MSARegister ws); - void frint_w(MSARegister wd, MSARegister ws); - void frint_d(MSARegister wd, MSARegister ws); - void flog2_w(MSARegister wd, MSARegister ws); - void flog2_d(MSARegister wd, MSARegister ws); - void fexupl_w(MSARegister wd, MSARegister ws); - void fexupl_d(MSARegister wd, MSARegister ws); - void fexupr_w(MSARegister wd, MSARegister ws); - void fexupr_d(MSARegister wd, MSARegister ws); - void ffql_w(MSARegister wd, MSARegister ws); - void ffql_d(MSARegister wd, MSARegister ws); - void ffqr_w(MSARegister wd, MSARegister ws); - void ffqr_d(MSARegister wd, MSARegister ws); - void ftint_s_w(MSARegister wd, MSARegister ws); - void ftint_s_d(MSARegister wd, MSARegister ws); - void ftint_u_w(MSARegister wd, MSARegister ws); - void ftint_u_d(MSARegister wd, MSARegister ws); - void ffint_s_w(MSARegister wd, MSARegister ws); - void ffint_s_d(MSARegister wd, MSARegister ws); - void ffint_u_w(MSARegister wd, MSARegister ws); - void ffint_u_d(MSARegister wd, MSARegister ws); - - void sll_b(MSARegister wd, MSARegister ws, MSARegister wt); - void sll_h(MSARegister wd, MSARegister ws, MSARegister wt); - void sll_w(MSARegister wd, MSARegister ws, MSARegister wt); - void sll_d(MSARegister wd, MSARegister ws, MSARegister wt); - void sra_b(MSARegister wd, MSARegister ws, MSARegister wt); - void sra_h(MSARegister wd, MSARegister ws, MSARegister wt); - void sra_w(MSARegister wd, MSARegister ws, MSARegister wt); - void sra_d(MSARegister wd, MSARegister ws, MSARegister wt); - void srl_b(MSARegister wd, MSARegister ws, MSARegister wt); - void srl_h(MSARegister wd, MSARegister ws, MSARegister wt); - void srl_w(MSARegister wd, MSARegister ws, MSARegister wt); - void srl_d(MSARegister wd, MSARegister ws, MSARegister wt); - void bclr_b(MSARegister wd, MSARegister ws, MSARegister wt); - void bclr_h(MSARegister wd, MSARegister ws, MSARegister wt); - void bclr_w(MSARegister wd, MSARegister ws, MSARegister wt); - void bclr_d(MSARegister wd, MSARegister ws, MSARegister wt); - void bset_b(MSARegister wd, MSARegister ws, MSARegister wt); - void bset_h(MSARegister wd, MSARegister ws, MSARegister wt); - void bset_w(MSARegister wd, MSARegister ws, MSARegister wt); - void bset_d(MSARegister wd, MSARegister ws, MSARegister wt); - void bneg_b(MSARegister wd, MSARegister ws, MSARegister wt); - void bneg_h(MSARegister wd, MSARegister ws, MSARegister wt); - void bneg_w(MSARegister wd, MSARegister ws, MSARegister wt); - void bneg_d(MSARegister wd, MSARegister ws, MSARegister wt); - void binsl_b(MSARegister wd, MSARegister ws, MSARegister wt); - void binsl_h(MSARegister wd, MSARegister ws, MSARegister wt); - void binsl_w(MSARegister wd, MSARegister ws, MSARegister wt); - void binsl_d(MSARegister wd, MSARegister ws, MSARegister wt); - void binsr_b(MSARegister wd, MSARegister ws, MSARegister wt); - void binsr_h(MSARegister wd, MSARegister ws, MSARegister wt); - void binsr_w(MSARegister wd, MSARegister ws, MSARegister wt); - void binsr_d(MSARegister wd, MSARegister ws, MSARegister wt); - void addv_b(MSARegister wd, MSARegister ws, MSARegister wt); - void addv_h(MSARegister wd, MSARegister ws, MSARegister wt); - void addv_w(MSARegister wd, MSARegister ws, MSARegister wt); - void addv_d(MSARegister wd, MSARegister ws, MSARegister wt); - void subv_b(MSARegister wd, MSARegister ws, MSARegister wt); - void subv_h(MSARegister wd, MSARegister ws, MSARegister wt); - void subv_w(MSARegister wd, MSARegister ws, MSARegister wt); - void subv_d(MSARegister wd, MSARegister ws, MSARegister wt); - void max_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void max_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void max_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void max_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void max_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void max_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void max_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void max_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void min_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void min_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void min_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void min_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void min_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void min_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void min_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void min_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void max_a_b(MSARegister wd, MSARegister ws, MSARegister wt); - void max_a_h(MSARegister wd, MSARegister ws, MSARegister wt); - void max_a_w(MSARegister wd, MSARegister ws, MSARegister wt); - void max_a_d(MSARegister wd, MSARegister ws, MSARegister wt); - void min_a_b(MSARegister wd, MSARegister ws, MSARegister wt); - void min_a_h(MSARegister wd, MSARegister ws, MSARegister wt); - void min_a_w(MSARegister wd, MSARegister ws, MSARegister wt); - void min_a_d(MSARegister wd, MSARegister ws, MSARegister wt); - void ceq_b(MSARegister wd, MSARegister ws, MSARegister wt); - void ceq_h(MSARegister wd, MSARegister ws, MSARegister wt); - void ceq_w(MSARegister wd, MSARegister ws, MSARegister wt); - void ceq_d(MSARegister wd, MSARegister ws, MSARegister wt); - void clt_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void clt_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void clt_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void clt_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void clt_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void clt_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void clt_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void clt_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void cle_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void cle_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void cle_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void cle_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void cle_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void cle_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void cle_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void cle_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void add_a_b(MSARegister wd, MSARegister ws, MSARegister wt); - void add_a_h(MSARegister wd, MSARegister ws, MSARegister wt); - void add_a_w(MSARegister wd, MSARegister ws, MSARegister wt); - void add_a_d(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_a_b(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_a_h(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_a_w(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_a_d(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void adds_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void ave_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void ave_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void ave_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void ave_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void ave_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void ave_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void ave_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void ave_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void aver_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void aver_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void aver_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void aver_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void aver_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void aver_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void aver_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void aver_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void subs_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void subs_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void subs_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void subs_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void subs_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void subs_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void subs_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void subs_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void subsus_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void subsus_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void subsus_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void subsus_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void subsus_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void subsus_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void subsus_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void subsus_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void subsuu_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void subsuu_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void subsuu_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void subsuu_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void subsuu_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void subsuu_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void subsuu_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void subsuu_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void asub_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void asub_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void asub_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void asub_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void asub_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void asub_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void asub_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void asub_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void mulv_b(MSARegister wd, MSARegister ws, MSARegister wt); - void mulv_h(MSARegister wd, MSARegister ws, MSARegister wt); - void mulv_w(MSARegister wd, MSARegister ws, MSARegister wt); - void mulv_d(MSARegister wd, MSARegister ws, MSARegister wt); - void maddv_b(MSARegister wd, MSARegister ws, MSARegister wt); - void maddv_h(MSARegister wd, MSARegister ws, MSARegister wt); - void maddv_w(MSARegister wd, MSARegister ws, MSARegister wt); - void maddv_d(MSARegister wd, MSARegister ws, MSARegister wt); - void msubv_b(MSARegister wd, MSARegister ws, MSARegister wt); - void msubv_h(MSARegister wd, MSARegister ws, MSARegister wt); - void msubv_w(MSARegister wd, MSARegister ws, MSARegister wt); - void msubv_d(MSARegister wd, MSARegister ws, MSARegister wt); - void div_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void div_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void div_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void div_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void div_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void div_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void div_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void div_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void mod_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void mod_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void mod_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void mod_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void mod_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void mod_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void mod_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void mod_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void dotp_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void dotp_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void dotp_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void dotp_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void dotp_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void dotp_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void dotp_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void dotp_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void dpadd_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void dpadd_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void dpadd_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void dpadd_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void dpadd_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void dpadd_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void dpadd_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void dpadd_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void dpsub_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void dpsub_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void dpsub_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void dpsub_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void dpsub_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void dpsub_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void dpsub_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void dpsub_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void sld_b(MSARegister wd, MSARegister ws, Register rt); - void sld_h(MSARegister wd, MSARegister ws, Register rt); - void sld_w(MSARegister wd, MSARegister ws, Register rt); - void sld_d(MSARegister wd, MSARegister ws, Register rt); - void splat_b(MSARegister wd, MSARegister ws, Register rt); - void splat_h(MSARegister wd, MSARegister ws, Register rt); - void splat_w(MSARegister wd, MSARegister ws, Register rt); - void splat_d(MSARegister wd, MSARegister ws, Register rt); - void pckev_b(MSARegister wd, MSARegister ws, MSARegister wt); - void pckev_h(MSARegister wd, MSARegister ws, MSARegister wt); - void pckev_w(MSARegister wd, MSARegister ws, MSARegister wt); - void pckev_d(MSARegister wd, MSARegister ws, MSARegister wt); - void pckod_b(MSARegister wd, MSARegister ws, MSARegister wt); - void pckod_h(MSARegister wd, MSARegister ws, MSARegister wt); - void pckod_w(MSARegister wd, MSARegister ws, MSARegister wt); - void pckod_d(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvl_b(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvl_h(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvl_w(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvl_d(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvr_b(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvr_h(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvr_w(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvr_d(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvev_b(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvev_h(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvev_w(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvev_d(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvod_b(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvod_h(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvod_w(MSARegister wd, MSARegister ws, MSARegister wt); - void ilvod_d(MSARegister wd, MSARegister ws, MSARegister wt); - void vshf_b(MSARegister wd, MSARegister ws, MSARegister wt); - void vshf_h(MSARegister wd, MSARegister ws, MSARegister wt); - void vshf_w(MSARegister wd, MSARegister ws, MSARegister wt); - void vshf_d(MSARegister wd, MSARegister ws, MSARegister wt); - void srar_b(MSARegister wd, MSARegister ws, MSARegister wt); - void srar_h(MSARegister wd, MSARegister ws, MSARegister wt); - void srar_w(MSARegister wd, MSARegister ws, MSARegister wt); - void srar_d(MSARegister wd, MSARegister ws, MSARegister wt); - void srlr_b(MSARegister wd, MSARegister ws, MSARegister wt); - void srlr_h(MSARegister wd, MSARegister ws, MSARegister wt); - void srlr_w(MSARegister wd, MSARegister ws, MSARegister wt); - void srlr_d(MSARegister wd, MSARegister ws, MSARegister wt); - void hadd_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void hadd_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void hadd_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void hadd_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void hadd_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void hadd_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void hadd_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void hadd_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - void hsub_s_b(MSARegister wd, MSARegister ws, MSARegister wt); - void hsub_s_h(MSARegister wd, MSARegister ws, MSARegister wt); - void hsub_s_w(MSARegister wd, MSARegister ws, MSARegister wt); - void hsub_s_d(MSARegister wd, MSARegister ws, MSARegister wt); - void hsub_u_b(MSARegister wd, MSARegister ws, MSARegister wt); - void hsub_u_h(MSARegister wd, MSARegister ws, MSARegister wt); - void hsub_u_w(MSARegister wd, MSARegister ws, MSARegister wt); - void hsub_u_d(MSARegister wd, MSARegister ws, MSARegister wt); - - void fcaf_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fcaf_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fcun_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fcun_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fceq_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fceq_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fcueq_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fcueq_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fclt_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fclt_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fcult_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fcult_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fcle_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fcle_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fcule_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fcule_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fsaf_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsaf_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fsun_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsun_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fseq_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fseq_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fsueq_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsueq_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fslt_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fslt_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fsult_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsult_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fsle_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsle_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fsule_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsule_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fadd_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fadd_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fsub_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsub_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fmul_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fmul_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fdiv_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fdiv_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fmadd_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fmadd_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fmsub_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fmsub_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fexp2_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fexp2_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fexdo_h(MSARegister wd, MSARegister ws, MSARegister wt); - void fexdo_w(MSARegister wd, MSARegister ws, MSARegister wt); - void ftq_h(MSARegister wd, MSARegister ws, MSARegister wt); - void ftq_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fmin_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fmin_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fmin_a_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fmin_a_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fmax_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fmax_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fmax_a_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fmax_a_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fcor_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fcor_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fcune_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fcune_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fcne_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fcne_d(MSARegister wd, MSARegister ws, MSARegister wt); - void mul_q_h(MSARegister wd, MSARegister ws, MSARegister wt); - void mul_q_w(MSARegister wd, MSARegister ws, MSARegister wt); - void madd_q_h(MSARegister wd, MSARegister ws, MSARegister wt); - void madd_q_w(MSARegister wd, MSARegister ws, MSARegister wt); - void msub_q_h(MSARegister wd, MSARegister ws, MSARegister wt); - void msub_q_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsor_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsor_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fsune_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsune_d(MSARegister wd, MSARegister ws, MSARegister wt); - void fsne_w(MSARegister wd, MSARegister ws, MSARegister wt); - void fsne_d(MSARegister wd, MSARegister ws, MSARegister wt); - void mulr_q_h(MSARegister wd, MSARegister ws, MSARegister wt); - void mulr_q_w(MSARegister wd, MSARegister ws, MSARegister wt); - void maddr_q_h(MSARegister wd, MSARegister ws, MSARegister wt); - void maddr_q_w(MSARegister wd, MSARegister ws, MSARegister wt); - void msubr_q_h(MSARegister wd, MSARegister ws, MSARegister wt); - void msubr_q_w(MSARegister wd, MSARegister ws, MSARegister wt); - - void sldi_b(MSARegister wd, MSARegister ws, uint32_t n); - void sldi_h(MSARegister wd, MSARegister ws, uint32_t n); - void sldi_w(MSARegister wd, MSARegister ws, uint32_t n); - void sldi_d(MSARegister wd, MSARegister ws, uint32_t n); - void splati_b(MSARegister wd, MSARegister ws, uint32_t n); - void splati_h(MSARegister wd, MSARegister ws, uint32_t n); - void splati_w(MSARegister wd, MSARegister ws, uint32_t n); - void splati_d(MSARegister wd, MSARegister ws, uint32_t n); - void copy_s_b(Register rd, MSARegister ws, uint32_t n); - void copy_s_h(Register rd, MSARegister ws, uint32_t n); - void copy_s_w(Register rd, MSARegister ws, uint32_t n); - void copy_u_b(Register rd, MSARegister ws, uint32_t n); - void copy_u_h(Register rd, MSARegister ws, uint32_t n); - void copy_u_w(Register rd, MSARegister ws, uint32_t n); - void insert_b(MSARegister wd, uint32_t n, Register rs); - void insert_h(MSARegister wd, uint32_t n, Register rs); - void insert_w(MSARegister wd, uint32_t n, Register rs); - void insve_b(MSARegister wd, uint32_t n, MSARegister ws); - void insve_h(MSARegister wd, uint32_t n, MSARegister ws); - void insve_w(MSARegister wd, uint32_t n, MSARegister ws); - void insve_d(MSARegister wd, uint32_t n, MSARegister ws); - void move_v(MSARegister wd, MSARegister ws); - void ctcmsa(MSAControlRegister cd, Register rs); - void cfcmsa(Register rd, MSAControlRegister cs); - - void slli_b(MSARegister wd, MSARegister ws, uint32_t m); - void slli_h(MSARegister wd, MSARegister ws, uint32_t m); - void slli_w(MSARegister wd, MSARegister ws, uint32_t m); - void slli_d(MSARegister wd, MSARegister ws, uint32_t m); - void srai_b(MSARegister wd, MSARegister ws, uint32_t m); - void srai_h(MSARegister wd, MSARegister ws, uint32_t m); - void srai_w(MSARegister wd, MSARegister ws, uint32_t m); - void srai_d(MSARegister wd, MSARegister ws, uint32_t m); - void srli_b(MSARegister wd, MSARegister ws, uint32_t m); - void srli_h(MSARegister wd, MSARegister ws, uint32_t m); - void srli_w(MSARegister wd, MSARegister ws, uint32_t m); - void srli_d(MSARegister wd, MSARegister ws, uint32_t m); - void bclri_b(MSARegister wd, MSARegister ws, uint32_t m); - void bclri_h(MSARegister wd, MSARegister ws, uint32_t m); - void bclri_w(MSARegister wd, MSARegister ws, uint32_t m); - void bclri_d(MSARegister wd, MSARegister ws, uint32_t m); - void bseti_b(MSARegister wd, MSARegister ws, uint32_t m); - void bseti_h(MSARegister wd, MSARegister ws, uint32_t m); - void bseti_w(MSARegister wd, MSARegister ws, uint32_t m); - void bseti_d(MSARegister wd, MSARegister ws, uint32_t m); - void bnegi_b(MSARegister wd, MSARegister ws, uint32_t m); - void bnegi_h(MSARegister wd, MSARegister ws, uint32_t m); - void bnegi_w(MSARegister wd, MSARegister ws, uint32_t m); - void bnegi_d(MSARegister wd, MSARegister ws, uint32_t m); - void binsli_b(MSARegister wd, MSARegister ws, uint32_t m); - void binsli_h(MSARegister wd, MSARegister ws, uint32_t m); - void binsli_w(MSARegister wd, MSARegister ws, uint32_t m); - void binsli_d(MSARegister wd, MSARegister ws, uint32_t m); - void binsri_b(MSARegister wd, MSARegister ws, uint32_t m); - void binsri_h(MSARegister wd, MSARegister ws, uint32_t m); - void binsri_w(MSARegister wd, MSARegister ws, uint32_t m); - void binsri_d(MSARegister wd, MSARegister ws, uint32_t m); - void sat_s_b(MSARegister wd, MSARegister ws, uint32_t m); - void sat_s_h(MSARegister wd, MSARegister ws, uint32_t m); - void sat_s_w(MSARegister wd, MSARegister ws, uint32_t m); - void sat_s_d(MSARegister wd, MSARegister ws, uint32_t m); - void sat_u_b(MSARegister wd, MSARegister ws, uint32_t m); - void sat_u_h(MSARegister wd, MSARegister ws, uint32_t m); - void sat_u_w(MSARegister wd, MSARegister ws, uint32_t m); - void sat_u_d(MSARegister wd, MSARegister ws, uint32_t m); - void srari_b(MSARegister wd, MSARegister ws, uint32_t m); - void srari_h(MSARegister wd, MSARegister ws, uint32_t m); - void srari_w(MSARegister wd, MSARegister ws, uint32_t m); - void srari_d(MSARegister wd, MSARegister ws, uint32_t m); - void srlri_b(MSARegister wd, MSARegister ws, uint32_t m); - void srlri_h(MSARegister wd, MSARegister ws, uint32_t m); - void srlri_w(MSARegister wd, MSARegister ws, uint32_t m); - void srlri_d(MSARegister wd, MSARegister ws, uint32_t m); - - // Check the code size generated from label to here. - int SizeOfCodeGeneratedSince(Label* label) { - return pc_offset() - label->pos(); - } - - // Check the number of instructions generated from label to here. - int InstructionsGeneratedSince(Label* label) { - return SizeOfCodeGeneratedSince(label) / kInstrSize; - } - - // Class for scoping postponing the trampoline pool generation. - class V8_NODISCARD BlockTrampolinePoolScope { - public: - explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) { - assem_->StartBlockTrampolinePool(); - } - ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); } - - private: - Assembler* assem_; - - DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope); - }; - - // Class for postponing the assembly buffer growth. Typically used for - // sequences of instructions that must be emitted as a unit, before - // buffer growth (and relocation) can occur. - // This blocking scope is not nestable. - class V8_NODISCARD BlockGrowBufferScope { - public: - explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) { - assem_->StartBlockGrowBuffer(); - } - ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); } - - private: - Assembler* assem_; - - DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope); - }; - - // Record a deoptimization reason that can be used by a log or cpu profiler. - // Use --trace-deopt to enable. - void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id, - SourcePosition position, int id); - - static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, - intptr_t pc_delta); - - static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc, - intptr_t pc_delta); - - // Writes a single byte or word of data in the code stream. Used for - // inline tables, e.g., jump-tables. - void db(uint8_t data); - void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); - void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO); - void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { - dd(data, rmode); - } - void dd(Label* label); - - // Postpone the generation of the trampoline pool for the specified number of - // instructions. - void BlockTrampolinePoolFor(int instructions); - - // Check if there is less than kGap bytes available in the buffer. - // If this is the case, we need to grow the buffer before emitting - // an instruction or relocation information. - inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; } - - // Get the number of bytes available in the buffer. - inline int available_space() const { return reloc_info_writer.pos() - pc_; } - - // Read/patch instructions. - static Instr instr_at(Address pc) { return *reinterpret_cast(pc); } - static void instr_at_put(Address pc, Instr instr) { - *reinterpret_cast(pc) = instr; - } - Instr instr_at(int pos) { - return *reinterpret_cast(buffer_start_ + pos); - } - void instr_at_put(int pos, Instr instr) { - *reinterpret_cast(buffer_start_ + pos) = instr; - } - - // Check if an instruction is a branch of some kind. - static bool IsBranch(Instr instr); - static bool IsMsaBranch(Instr instr); - static bool IsBc(Instr instr); - static bool IsNal(Instr instr); - static bool IsBzc(Instr instr); - static bool IsBeq(Instr instr); - static bool IsBne(Instr instr); - static bool IsBeqzc(Instr instr); - static bool IsBnezc(Instr instr); - static bool IsBeqc(Instr instr); - static bool IsBnec(Instr instr); - static bool IsJicOrJialc(Instr instr); - static bool IsMov(Instr instr, Register rd, Register rs); - - static bool IsJump(Instr instr); - static bool IsJ(Instr instr); - static bool IsLui(Instr instr); - static bool IsOri(Instr instr); - static bool IsAddu(Instr instr, Register rd, Register rs, Register rt); - - static bool IsJal(Instr instr); - static bool IsJr(Instr instr); - static bool IsJalr(Instr instr); - - static bool IsNop(Instr instr, unsigned int type); - static bool IsPop(Instr instr); - static bool IsPush(Instr instr); - static bool IsLwRegFpOffset(Instr instr); - static bool IsSwRegFpOffset(Instr instr); - static bool IsLwRegFpNegOffset(Instr instr); - static bool IsSwRegFpNegOffset(Instr instr); - - static Register GetRtReg(Instr instr); - static Register GetRsReg(Instr instr); - static Register GetRdReg(Instr instr); - - static uint32_t GetRt(Instr instr); - static uint32_t GetRtField(Instr instr); - static uint32_t GetRs(Instr instr); - static uint32_t GetRsField(Instr instr); - static uint32_t GetRd(Instr instr); - static uint32_t GetRdField(Instr instr); - static uint32_t GetSa(Instr instr); - static uint32_t GetSaField(Instr instr); - static uint32_t GetOpcodeField(Instr instr); - static uint32_t GetFunction(Instr instr); - static uint32_t GetFunctionField(Instr instr); - static uint32_t GetImmediate16(Instr instr); - static uint32_t GetLabelConst(Instr instr); - - static int32_t GetBranchOffset(Instr instr); - static bool IsLw(Instr instr); - static int16_t GetLwOffset(Instr instr); - static int16_t GetJicOrJialcOffset(Instr instr); - static int16_t GetLuiOffset(Instr instr); - static Instr SetLwOffset(Instr instr, int16_t offset); - - static bool IsSw(Instr instr); - static Instr SetSwOffset(Instr instr, int16_t offset); - static bool IsAddImmediate(Instr instr); - static Instr SetAddImmediateOffset(Instr instr, int16_t offset); - static uint32_t CreateTargetAddress(Instr instr_lui, Instr instr_jic); - static void UnpackTargetAddress(uint32_t address, int16_t* lui_offset, - int16_t* jic_offset); - static void UnpackTargetAddressUnsigned(uint32_t address, - uint32_t* lui_offset, - uint32_t* jic_offset); - - static bool IsAndImmediate(Instr instr); - static bool IsEmittedConstant(Instr instr); - - void CheckTrampolinePool(); - - bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; } - static bool IsCompactBranchSupported() { - return IsMipsArchVariant(kMips32r6); - } - - // Get the code target object for a pc-relative call or jump. - V8_INLINE Handle relative_code_target_object_handle_at( - Address pc_) const; - - inline int UnboundLabelsCount() { return unbound_labels_count_; } - - bool is_trampoline_emitted() const { return trampoline_emitted_; } - - protected: - // Load Scaled Address instruction. - void lsa(Register rd, Register rt, Register rs, uint8_t sa); - - // Readable constants for base and offset adjustment helper, these indicate if - // aside from offset, another value like offset + 4 should fit into int16. - enum class OffsetAccessType : bool { - SINGLE_ACCESS = false, - TWO_ACCESSES = true - }; - - // Helper function for memory load/store using base register and offset. - void AdjustBaseAndOffset( - MemOperand* src, - OffsetAccessType access_type = OffsetAccessType::SINGLE_ACCESS, - int second_access_add_to_offset = 4); - - int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } - - // Decode branch instruction at pos and return branch target pos. - int target_at(int pos, bool is_internal); - - // Patch branch instruction at pos to branch to given branch target pos. - void target_at_put(int pos, int target_pos, bool is_internal); - - // Say if we need to relocate with this mode. - bool MustUseReg(RelocInfo::Mode rmode); - - // Record reloc info for current pc_. - void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0); - - // Read 32-bit immediate from lui, ori pair that is used to load immediate. - static int32_t GetLuiOriImmediate(Instr instr1, Instr instr2); - - // Block the emission of the trampoline pool before pc_offset. - void BlockTrampolinePoolBefore(int pc_offset) { - if (no_trampoline_pool_before_ < pc_offset) - no_trampoline_pool_before_ = pc_offset; - } - - void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; } - - void EndBlockTrampolinePool() { - trampoline_pool_blocked_nesting_--; - if (trampoline_pool_blocked_nesting_ == 0) { - CheckTrampolinePoolQuick(1); - } - } - - bool is_trampoline_pool_blocked() const { - return trampoline_pool_blocked_nesting_ > 0; - } - - bool has_exception() const { return internal_trampoline_exception_; } - - // Temporarily block automatic assembly buffer growth. - void StartBlockGrowBuffer() { - DCHECK(!block_buffer_growth_); - block_buffer_growth_ = true; - } - - void EndBlockGrowBuffer() { - DCHECK(block_buffer_growth_); - block_buffer_growth_ = false; - } - - bool is_buffer_growth_blocked() const { return block_buffer_growth_; } - - void EmitForbiddenSlotInstruction() { - if (IsPrevInstrCompactBranch()) { - nop(); - } - } - - inline void CheckTrampolinePoolQuick(int extra_instructions = 0) { - if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) { - CheckTrampolinePool(); - } - } - - inline void CheckBuffer(); - - RegList scratch_register_list_; - - // Generate common instruction sequence. - void GenPCRelativeJump(Register tf, Register ts, int32_t imm32, - RelocInfo::Mode rmode, BranchDelaySlot bdslot); - void GenPCRelativeJumpAndLink(Register t, int32_t imm32, - RelocInfo::Mode rmode, BranchDelaySlot bdslot); - - void set_pc_for_safepoint() { pc_for_safepoint_ = pc_; } - - private: - // Avoid overflows for displacements etc. - static const int kMaximalBufferSize = 512 * MB; - - inline static void set_target_internal_reference_encoded_at(Address pc, - Address target); - - // Buffer size and constant pool distance are checked together at regular - // intervals of kBufferCheckInterval emitted bytes. - static constexpr int kBufferCheckInterval = 1 * KB / 2; - - // Code generation. - // The relocation writer's position is at least kGap bytes below the end of - // the generated instructions. This is so that multi-instruction sequences do - // not have to check for overflow. The same is true for writes of large - // relocation info entries. - static constexpr int kGap = 32; - static_assert(AssemblerBase::kMinimalBufferSize >= 2 * kGap); - - // Repeated checking whether the trampoline pool should be emitted is rather - // expensive. By default we only check again once a number of instructions - // has been generated. - static constexpr int kCheckConstIntervalInst = 32; - static constexpr int kCheckConstInterval = - kCheckConstIntervalInst * kInstrSize; - - int next_buffer_check_; // pc offset of next buffer check. - - // Emission of the trampoline pool may be blocked in some code sequences. - int trampoline_pool_blocked_nesting_; // Block emission if this is not zero. - int no_trampoline_pool_before_; // Block emission before this pc offset. - - // Keep track of the last emitted pool to guarantee a maximal distance. - int last_trampoline_pool_end_; // pc offset of the end of the last pool. - - // Automatic growth of the assembly buffer may be blocked for some sequences. - bool block_buffer_growth_; // Block growth when true. - - // Relocation information generation. - // Each relocation is encoded as a variable size value. - static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize; - RelocInfoWriter reloc_info_writer; - - // The bound position, before this we cannot do instruction elimination. - int last_bound_pos_; - - // Readable constants for compact branch handling in emit() - enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true }; - - // Code emission. - void GrowBuffer(); - inline void emit(Instr x, - CompactBranchType is_compact_branch = CompactBranchType::NO); - inline void emit(uint64_t x); - inline void CheckForEmitInForbiddenSlot(); - template - inline void EmitHelper(T x); - inline void EmitHelper(Instr x, CompactBranchType is_compact_branch); - - // Instruction generation. - // We have 3 different kind of encoding layout on MIPS. - // However due to many different types of objects encoded in the same fields - // we have quite a few aliases for each mode. - // Using the same structure to refer to Register and FPURegister would spare a - // few aliases, but mixing both does not look clean to me. - // Anyway we could surely implement this differently. - - void GenInstrRegister(Opcode opcode, Register rs, Register rt, Register rd, - uint16_t sa = 0, SecondaryField func = nullptrSF); - - void GenInstrRegister(Opcode opcode, Register rs, Register rt, uint16_t msb, - uint16_t lsb, SecondaryField func); - - void GenInstrRegister(Opcode opcode, SecondaryField fmt, FPURegister ft, - FPURegister fs, FPURegister fd, - SecondaryField func = nullptrSF); - - void GenInstrRegister(Opcode opcode, FPURegister fr, FPURegister ft, - FPURegister fs, FPURegister fd, - SecondaryField func = nullptrSF); - - void GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, - FPURegister fs, FPURegister fd, - SecondaryField func = nullptrSF); - - void GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, - FPUControlRegister fs, SecondaryField func = nullptrSF); - - void GenInstrImmediate( - Opcode opcode, Register rs, Register rt, int32_t j, - CompactBranchType is_compact_branch = CompactBranchType::NO); - void GenInstrImmediate( - Opcode opcode, Register rs, SecondaryField SF, int32_t j, - CompactBranchType is_compact_branch = CompactBranchType::NO); - void GenInstrImmediate( - Opcode opcode, Register r1, FPURegister r2, int32_t j, - CompactBranchType is_compact_branch = CompactBranchType::NO); - void GenInstrImmediate(Opcode opcode, Register base, Register rt, - int32_t offset9, int bit6, SecondaryField func); - void GenInstrImmediate( - Opcode opcode, Register rs, int32_t offset21, - CompactBranchType is_compact_branch = CompactBranchType::NO); - void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21); - void GenInstrImmediate( - Opcode opcode, int32_t offset26, - CompactBranchType is_compact_branch = CompactBranchType::NO); - - void GenInstrJump(Opcode opcode, uint32_t address); - - // MSA - void GenInstrMsaI8(SecondaryField operation, uint32_t imm8, MSARegister ws, - MSARegister wd); - - void GenInstrMsaI5(SecondaryField operation, SecondaryField df, int32_t imm5, - MSARegister ws, MSARegister wd); - - void GenInstrMsaBit(SecondaryField operation, SecondaryField df, uint32_t m, - MSARegister ws, MSARegister wd); - - void GenInstrMsaI10(SecondaryField operation, SecondaryField df, - int32_t imm10, MSARegister wd); - - template - void GenInstrMsa3R(SecondaryField operation, SecondaryField df, RegType t, - MSARegister ws, MSARegister wd); - - template - void GenInstrMsaElm(SecondaryField operation, SecondaryField df, uint32_t n, - SrcType src, DstType dst); - - void GenInstrMsa3RF(SecondaryField operation, uint32_t df, MSARegister wt, - MSARegister ws, MSARegister wd); - - void GenInstrMsaVec(SecondaryField operation, MSARegister wt, MSARegister ws, - MSARegister wd); - - void GenInstrMsaMI10(SecondaryField operation, int32_t s10, Register rs, - MSARegister wd); - - void GenInstrMsa2R(SecondaryField operation, SecondaryField df, - MSARegister ws, MSARegister wd); - - void GenInstrMsa2RF(SecondaryField operation, SecondaryField df, - MSARegister ws, MSARegister wd); - - void GenInstrMsaBranch(SecondaryField operation, MSARegister wt, - int32_t offset16); - - inline bool is_valid_msa_df_m(SecondaryField bit_df, uint32_t m) { - switch (bit_df) { - case BIT_DF_b: - return is_uint3(m); - case BIT_DF_h: - return is_uint4(m); - case BIT_DF_w: - return is_uint5(m); - case BIT_DF_d: - return is_uint6(m); - default: - return false; - } - } - - inline bool is_valid_msa_df_n(SecondaryField elm_df, uint32_t n) { - switch (elm_df) { - case ELM_DF_B: - return is_uint4(n); - case ELM_DF_H: - return is_uint3(n); - case ELM_DF_W: - return is_uint2(n); - case ELM_DF_D: - return is_uint1(n); - default: - return false; - } - } - - // Labels. - void print(const Label* L); - void bind_to(Label* L, int pos); - void next(Label* L, bool is_internal); - - // Patching lui/ori pair which is commonly used for loading constants. - static void PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr1, - Address offset_lui, Instr instr2, - Address offset_ori); - void PatchLuiOriImmediate(int pc, int32_t imm, Instr instr1, - Address offset_lui, Instr instr2, - Address offset_ori); - - // One trampoline consists of: - // - space for trampoline slots, - // - space for labels. - // - // Space for trampoline slots is equal to slot_count * 2 * kInstrSize. - // Space for trampoline slots precedes space for labels. Each label is of one - // instruction size, so total amount for labels is equal to - // label_count * kInstrSize. - class Trampoline { - public: - Trampoline() { - start_ = 0; - next_slot_ = 0; - free_slot_count_ = 0; - end_ = 0; - } - Trampoline(int start, int slot_count) { - start_ = start; - next_slot_ = start; - free_slot_count_ = slot_count; - end_ = start + slot_count * kTrampolineSlotsSize; - } - int start() { return start_; } - int end() { return end_; } - int take_slot() { - int trampoline_slot = kInvalidSlotPos; - if (free_slot_count_ <= 0) { - // We have run out of space on trampolines. - // Make sure we fail in debug mode, so we become aware of each case - // when this happens. - DCHECK(0); - // Internal exception will be caught. - } else { - trampoline_slot = next_slot_; - free_slot_count_--; - next_slot_ += kTrampolineSlotsSize; - } - return trampoline_slot; - } - - private: - int start_; - int end_; - int next_slot_; - int free_slot_count_; - }; - - int32_t get_trampoline_entry(int32_t pos); - int unbound_labels_count_; - // If trampoline is emitted, generated code is becoming large. As this is - // already a slow case which can possibly break our code generation for the - // extreme case, we use this information to trigger different mode of - // branch instruction generation, where we use jump instructions rather - // than regular branch instructions. - bool trampoline_emitted_; - static constexpr int kInvalidSlotPos = -1; - - // Internal reference positions, required for unbounded internal reference - // labels. - std::set internal_reference_positions_; - bool is_internal_reference(Label* L) { - return internal_reference_positions_.find(L->pos()) != - internal_reference_positions_.end(); - } - - void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } - void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } - bool prev_instr_compact_branch_ = false; - - Trampoline trampoline_; - bool internal_trampoline_exception_; - - // Keep track of the last Call's position to ensure that safepoint can get the - // correct information even if there is a trampoline immediately after the - // Call. - byte* pc_for_safepoint_; - - private: - void AllocateAndInstallRequestedHeapObjects(Isolate* isolate); - - int WriteCodeComments(); - - friend class RegExpMacroAssemblerMIPS; - friend class RelocInfo; - friend class BlockTrampolinePoolScope; - friend class EnsureSpace; -}; - -class EnsureSpace { - public: - explicit V8_INLINE EnsureSpace(Assembler* assembler); -}; - -class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope { - public: - explicit UseScratchRegisterScope(Assembler* assembler); - ~UseScratchRegisterScope(); - - Register Acquire(); - bool hasAvailable() const; - - void Include(const RegList& list) { *available_ |= list; } - void Exclude(const RegList& list) { available_->clear(list); } - void Include(const Register& reg1, const Register& reg2 = no_reg) { - RegList list({reg1, reg2}); - Include(list); - } - void Exclude(const Register& reg1, const Register& reg2 = no_reg) { - RegList list({reg1, reg2}); - Exclude(list); - } - - private: - RegList* available_; - RegList old_available_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CODEGEN_MIPS_ASSEMBLER_MIPS_H_ diff --git a/src/codegen/mips/constants-mips.cc b/src/codegen/mips/constants-mips.cc deleted file mode 100644 index 4411387060..0000000000 --- a/src/codegen/mips/constants-mips.cc +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_MIPS - -#include "src/codegen/mips/constants-mips.h" - -namespace v8 { -namespace internal { - -// ----------------------------------------------------------------------------- -// Registers. - -// These register names are defined in a way to match the native disassembler -// formatting. See for example the command "objdump -d ". -const char* Registers::names_[kNumSimuRegisters] = { - "zero_reg", "at", "v0", "v1", "a0", "a1", "a2", "a3", "t0", - "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", - "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", - "k1", "gp", "sp", "fp", "ra", "LO", "HI", "pc"}; - -// List of alias names which can be used when referring to MIPS registers. -const Registers::RegisterAlias Registers::aliases_[] = { - {0, "zero"}, - {23, "cp"}, - {30, "s8"}, - {30, "s8_fp"}, - {kInvalidRegister, nullptr}}; - -const char* Registers::Name(int reg) { - const char* result; - if ((0 <= reg) && (reg < kNumSimuRegisters)) { - result = names_[reg]; - } else { - result = "noreg"; - } - return result; -} - -int Registers::Number(const char* name) { - // Look through the canonical names. - for (int i = 0; i < kNumSimuRegisters; i++) { - if (strcmp(names_[i], name) == 0) { - return i; - } - } - - // Look through the alias names. - int i = 0; - while (aliases_[i].reg != kInvalidRegister) { - if (strcmp(aliases_[i].name, name) == 0) { - return aliases_[i].reg; - } - i++; - } - - // No register with the reguested name found. - return kInvalidRegister; -} - -const char* FPURegisters::names_[kNumFPURegisters] = { - "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", - "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", - "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; - -// List of alias names which can be used when referring to MIPS registers. -const FPURegisters::RegisterAlias FPURegisters::aliases_[] = { - {kInvalidRegister, nullptr}}; - -const char* FPURegisters::Name(int creg) { - const char* result; - if ((0 <= creg) && (creg < kNumFPURegisters)) { - result = names_[creg]; - } else { - result = "nocreg"; - } - return result; -} - -int FPURegisters::Number(const char* name) { - // Look through the canonical names. - for (int i = 0; i < kNumFPURegisters; i++) { - if (strcmp(names_[i], name) == 0) { - return i; - } - } - - // Look through the alias names. - int i = 0; - while (aliases_[i].creg != kInvalidRegister) { - if (strcmp(aliases_[i].name, name) == 0) { - return aliases_[i].creg; - } - i++; - } - - // No Cregister with the reguested name found. - return kInvalidFPURegister; -} - -const char* MSARegisters::names_[kNumMSARegisters] = { - "w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7", "w8", "w9", "w10", - "w11", "w12", "w13", "w14", "w15", "w16", "w17", "w18", "w19", "w20", "w21", - "w22", "w23", "w24", "w25", "w26", "w27", "w28", "w29", "w30", "w31"}; - -const MSARegisters::RegisterAlias MSARegisters::aliases_[] = { - {kInvalidRegister, nullptr}}; - -const char* MSARegisters::Name(int creg) { - const char* result; - if ((0 <= creg) && (creg < kNumMSARegisters)) { - result = names_[creg]; - } else { - result = "nocreg"; - } - return result; -} - -int MSARegisters::Number(const char* name) { - // Look through the canonical names. - for (int i = 0; i < kNumMSARegisters; i++) { - if (strcmp(names_[i], name) == 0) { - return i; - } - } - - // Look through the alias names. - int i = 0; - while (aliases_[i].creg != kInvalidRegister) { - if (strcmp(aliases_[i].name, name) == 0) { - return aliases_[i].creg; - } - i++; - } - - // No Cregister with the reguested name found. - return kInvalidMSARegister; -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS diff --git a/src/codegen/mips/constants-mips.h b/src/codegen/mips/constants-mips.h deleted file mode 100644 index 4cd67bb176..0000000000 --- a/src/codegen/mips/constants-mips.h +++ /dev/null @@ -1,1918 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_ -#define V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_ -#include "src/codegen/cpu-features.h" -// UNIMPLEMENTED_ macro for MIPS. -#ifdef DEBUG -#define UNIMPLEMENTED_MIPS() \ - v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \ - __FILE__, __LINE__, __func__) -#else -#define UNIMPLEMENTED_MIPS() -#endif - -#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n") - -enum ArchVariants { - kMips32r1 = v8::internal::MIPSr1, - kMips32r2 = v8::internal::MIPSr2, - kMips32r6 = v8::internal::MIPSr6, - kLoongson -}; - -#ifdef _MIPS_ARCH_MIPS32R2 -static const ArchVariants kArchVariant = kMips32r2; -#elif _MIPS_ARCH_MIPS32R6 -static const ArchVariants kArchVariant = kMips32r6; -#elif _MIPS_ARCH_LOONGSON -// The loongson flag refers to the LOONGSON architectures based on MIPS-III, -// which predates (and is a subset of) the mips32r2 and r1 architectures. -static const ArchVariants kArchVariant = kLoongson; -#elif _MIPS_ARCH_MIPS32RX -// This flags referred to compatibility mode that creates universal code that -// can run on any MIPS32 architecture revision. The dynamically generated code -// by v8 is specialized for the MIPS host detected in runtime probing. -static const ArchVariants kArchVariant = kMips32r1; -#else -static const ArchVariants kArchVariant = kMips32r1; -#endif - -enum Endianness { kLittle, kBig }; - -#if defined(V8_TARGET_LITTLE_ENDIAN) -static const Endianness kArchEndian = kLittle; -#elif defined(V8_TARGET_BIG_ENDIAN) -static const Endianness kArchEndian = kBig; -#else -#error Unknown endianness -#endif - -enum FpuMode { kFP32, kFP64, kFPXX }; - -#if defined(FPU_MODE_FP32) -static const FpuMode kFpuMode = kFP32; -#elif defined(FPU_MODE_FP64) -static const FpuMode kFpuMode = kFP64; -#elif defined(FPU_MODE_FPXX) -#if defined(_MIPS_ARCH_MIPS32R2) || defined(_MIPS_ARCH_MIPS32R6) -static const FpuMode kFpuMode = kFPXX; -#else -#error "FPXX is supported only on Mips32R2 and Mips32R6" -#endif -#else -static const FpuMode kFpuMode = kFP32; -#endif - -#if defined(__mips_hard_float) && __mips_hard_float != 0 -// Use floating-point coprocessor instructions. This flag is raised when -// -mhard-float is passed to the compiler. -const bool IsMipsSoftFloatABI = false; -#elif defined(__mips_soft_float) && __mips_soft_float != 0 -// This flag is raised when -msoft-float is passed to the compiler. -// Although FPU is a base requirement for v8, soft-float ABI is used -// on soft-float systems with FPU kernel emulation. -const bool IsMipsSoftFloatABI = true; -#else -const bool IsMipsSoftFloatABI = true; -#endif - -#if defined(V8_TARGET_LITTLE_ENDIAN) -const uint32_t kHoleNanUpper32Offset = 4; -const uint32_t kHoleNanLower32Offset = 0; -#elif defined(V8_TARGET_BIG_ENDIAN) -const uint32_t kHoleNanUpper32Offset = 0; -const uint32_t kHoleNanLower32Offset = 4; -#else -#error Unknown endianness -#endif - -constexpr bool IsFp64Mode() { return kFpuMode == kFP64; } -constexpr bool IsFp32Mode() { return kFpuMode == kFP32; } -constexpr bool IsFpxxMode() { return kFpuMode == kFPXX; } - -#ifndef _MIPS_ARCH_MIPS32RX -constexpr bool IsMipsArchVariant(const ArchVariants check) { - return kArchVariant == check; -} -#else -bool IsMipsArchVariant(const ArchVariants check) { - return CpuFeatures::IsSupported(static_cast(check)); -} -#endif - -#if defined(V8_TARGET_LITTLE_ENDIAN) -const uint32_t kMipsLwrOffset = 0; -const uint32_t kMipsLwlOffset = 3; -const uint32_t kMipsSwrOffset = 0; -const uint32_t kMipsSwlOffset = 3; -#elif defined(V8_TARGET_BIG_ENDIAN) -const uint32_t kMipsLwrOffset = 3; -const uint32_t kMipsLwlOffset = 0; -const uint32_t kMipsSwrOffset = 3; -const uint32_t kMipsSwlOffset = 0; -#else -#error Unknown endianness -#endif - -#if defined(V8_TARGET_LITTLE_ENDIAN) -const uint32_t kLeastSignificantByteInInt32Offset = 0; -#elif defined(V8_TARGET_BIG_ENDIAN) -const uint32_t kLeastSignificantByteInInt32Offset = 3; -#else -#error Unknown endianness -#endif - -#ifndef __STDC_FORMAT_MACROS -#define __STDC_FORMAT_MACROS -#endif -#include - -// Defines constants and accessor classes to assemble, disassemble and -// simulate MIPS32 instructions. -// -// See: MIPS32 Architecture For Programmers -// Volume II: The MIPS32 Instruction Set -// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf. - -namespace v8 { -namespace internal { - -constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096; - -// ----------------------------------------------------------------------------- -// Registers and FPURegisters. - -// Number of general purpose registers. -const int kNumRegisters = 32; -const int kInvalidRegister = -1; - -// Number of registers with HI, LO, and pc. -const int kNumSimuRegisters = 35; - -// In the simulator, the PC register is simulated as the 34th register. -const int kPCRegister = 34; - -// Number coprocessor registers. -const int kNumFPURegisters = 32; -const int kInvalidFPURegister = -1; - -// Number of MSA registers -const int kNumMSARegisters = 32; -const int kInvalidMSARegister = -1; - -const int kInvalidMSAControlRegister = -1; -const int kMSAIRRegister = 0; -const int kMSACSRRegister = 1; -const int kMSARegSize = 128; -const int kMSALanesByte = kMSARegSize / 8; -const int kMSALanesHalf = kMSARegSize / 16; -const int kMSALanesWord = kMSARegSize / 32; -const int kMSALanesDword = kMSARegSize / 64; - -// FPU (coprocessor 1) control registers. Currently only FCSR is implemented. -const int kFCSRRegister = 31; -const int kInvalidFPUControlRegister = -1; -const uint32_t kFPUInvalidResult = static_cast(1u << 31) - 1; -const int32_t kFPUInvalidResultNegative = static_cast(1u << 31); -const uint64_t kFPU64InvalidResult = - static_cast(static_cast(1) << 63) - 1; -const int64_t kFPU64InvalidResultNegative = - static_cast(static_cast(1) << 63); - -// FCSR constants. -const uint32_t kFCSRInexactFlagBit = 2; -const uint32_t kFCSRUnderflowFlagBit = 3; -const uint32_t kFCSROverflowFlagBit = 4; -const uint32_t kFCSRDivideByZeroFlagBit = 5; -const uint32_t kFCSRInvalidOpFlagBit = 6; -const uint32_t kFCSRNaN2008FlagBit = 18; - -const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit; -const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit; -const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit; -const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit; -const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit; -const uint32_t kFCSRNaN2008FlagMask = 1 << kFCSRNaN2008FlagBit; - -const uint32_t kFCSRFlagMask = - kFCSRInexactFlagMask | kFCSRUnderflowFlagMask | kFCSROverflowFlagMask | - kFCSRDivideByZeroFlagMask | kFCSRInvalidOpFlagMask; - -const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask; - -const uint32_t kFCSRInexactCauseBit = 12; -const uint32_t kFCSRUnderflowCauseBit = 13; -const uint32_t kFCSROverflowCauseBit = 14; -const uint32_t kFCSRDivideByZeroCauseBit = 15; -const uint32_t kFCSRInvalidOpCauseBit = 16; -const uint32_t kFCSRUnimplementedOpCauseBit = 17; - -const uint32_t kFCSRInexactCauseMask = 1 << kFCSRInexactCauseBit; -const uint32_t kFCSRUnderflowCauseMask = 1 << kFCSRUnderflowCauseBit; -const uint32_t kFCSROverflowCauseMask = 1 << kFCSROverflowCauseBit; -const uint32_t kFCSRDivideByZeroCauseMask = 1 << kFCSRDivideByZeroCauseBit; -const uint32_t kFCSRInvalidOpCauseMask = 1 << kFCSRInvalidOpCauseBit; -const uint32_t kFCSRUnimplementedOpCauseMask = 1 - << kFCSRUnimplementedOpCauseBit; - -const uint32_t kFCSRCauseMask = - kFCSRInexactCauseMask | kFCSRUnderflowCauseMask | kFCSROverflowCauseMask | - kFCSRDivideByZeroCauseMask | kFCSRInvalidOpCauseMask | - kFCSRUnimplementedOpCauseBit; - -// 'pref' instruction hints -const int32_t kPrefHintLoad = 0; -const int32_t kPrefHintStore = 1; -const int32_t kPrefHintLoadStreamed = 4; -const int32_t kPrefHintStoreStreamed = 5; -const int32_t kPrefHintLoadRetained = 6; -const int32_t kPrefHintStoreRetained = 7; -const int32_t kPrefHintWritebackInvalidate = 25; -const int32_t kPrefHintPrepareForStore = 30; - -// Actual value of root register is offset from the root array's start -// to take advantage of negative displacement values. -// TODO(sigurds): Choose best value. -constexpr int kRootRegisterBias = 256; - -// Helper functions for converting between register numbers and names. -class Registers { - public: - // Return the name of the register. - static const char* Name(int reg); - - // Lookup the register number for the name provided. - static int Number(const char* name); - - struct RegisterAlias { - int reg; - const char* name; - }; - - static const int32_t kMaxValue = 0x7fffffff; - static const int32_t kMinValue = 0x80000000; - - private: - static const char* names_[kNumSimuRegisters]; - static const RegisterAlias aliases_[]; -}; - -// Helper functions for converting between register numbers and names. -class FPURegisters { - public: - // Return the name of the register. - static const char* Name(int reg); - - // Lookup the register number for the name provided. - static int Number(const char* name); - - struct RegisterAlias { - int creg; - const char* name; - }; - - private: - static const char* names_[kNumFPURegisters]; - static const RegisterAlias aliases_[]; -}; - -// Helper functions for converting between register numbers and names. -class MSARegisters { - public: - // Return the name of the register. - static const char* Name(int reg); - - // Lookup the register number for the name provided. - static int Number(const char* name); - - struct RegisterAlias { - int creg; - const char* name; - }; - - private: - static const char* names_[kNumMSARegisters]; - static const RegisterAlias aliases_[]; -}; - -// ----------------------------------------------------------------------------- -// Instructions encoding constants. - -// On MIPS all instructions are 32 bits. -using Instr = int32_t; - -// Special Software Interrupt codes when used in the presence of the MIPS -// simulator. -enum SoftwareInterruptCodes { - // Transition to C code. - call_rt_redirected = 0xfffff -}; - -// On MIPS Simulator breakpoints can have different codes: -// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints, -// the simulator will run through them and print the registers. -// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop() -// instructions (see Assembler::stop()). -// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the -// debugger. -const uint32_t kMaxWatchpointCode = 31; -const uint32_t kMaxStopCode = 127; -static_assert(kMaxWatchpointCode < kMaxStopCode); - -// ----- Fields offset and length. -const int kOpcodeShift = 26; -const int kOpcodeBits = 6; -const int kRsShift = 21; -const int kRsBits = 5; -const int kRtShift = 16; -const int kRtBits = 5; -const int kRdShift = 11; -const int kRdBits = 5; -const int kSaShift = 6; -const int kSaBits = 5; -const int kLsaSaBits = 2; -const int kFunctionShift = 0; -const int kFunctionBits = 6; -const int kLuiShift = 16; -const int kBp2Shift = 6; -const int kBp2Bits = 2; -const int kBaseShift = 21; -const int kBaseBits = 5; -const int kBit6Shift = 6; -const int kBit6Bits = 1; - -const int kImm9Shift = 7; -const int kImm9Bits = 9; -const int kImm16Shift = 0; -const int kImm16Bits = 16; -const int kImm18Shift = 0; -const int kImm18Bits = 18; -const int kImm19Shift = 0; -const int kImm19Bits = 19; -const int kImm21Shift = 0; -const int kImm21Bits = 21; -const int kImm26Shift = 0; -const int kImm26Bits = 26; -const int kImm28Shift = 0; -const int kImm28Bits = 28; -const int kImm32Shift = 0; -const int kImm32Bits = 32; -const int kMsaImm8Shift = 16; -const int kMsaImm8Bits = 8; -const int kMsaImm5Shift = 16; -const int kMsaImm5Bits = 5; -const int kMsaImm10Shift = 11; -const int kMsaImm10Bits = 10; -const int kMsaImmMI10Shift = 16; -const int kMsaImmMI10Bits = 10; - -// In branches and jumps immediate fields point to words, not bytes, -// and are therefore shifted by 2. -const int kImmFieldShift = 2; - -const int kFrBits = 5; -const int kFrShift = 21; -const int kFsShift = 11; -const int kFsBits = 5; -const int kFtShift = 16; -const int kFtBits = 5; -const int kFdShift = 6; -const int kFdBits = 5; -const int kFCccShift = 8; -const int kFCccBits = 3; -const int kFBccShift = 18; -const int kFBccBits = 3; -const int kFBtrueShift = 16; -const int kFBtrueBits = 1; -const int kWtBits = 5; -const int kWtShift = 16; -const int kWsBits = 5; -const int kWsShift = 11; -const int kWdBits = 5; -const int kWdShift = 6; - -// ----- Miscellaneous useful masks. -// Instruction bit masks. -const int kOpcodeMask = ((1 << kOpcodeBits) - 1) << kOpcodeShift; -const int kImm9Mask = ((1 << kImm9Bits) - 1) << kImm9Shift; -const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift; -const int kImm18Mask = ((1 << kImm18Bits) - 1) << kImm18Shift; -const int kImm19Mask = ((1 << kImm19Bits) - 1) << kImm19Shift; -const int kImm21Mask = ((1 << kImm21Bits) - 1) << kImm21Shift; -const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift; -const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift; -const int kImm5Mask = ((1 << 5) - 1); -const int kImm8Mask = ((1 << 8) - 1); -const int kImm10Mask = ((1 << 10) - 1); -const int kMsaI5I10Mask = ((7U << 23) | ((1 << 6) - 1)); -const int kMsaI8Mask = ((3U << 24) | ((1 << 6) - 1)); -const int kMsaI5Mask = ((7U << 23) | ((1 << 6) - 1)); -const int kMsaMI10Mask = (15U << 2); -const int kMsaBITMask = ((7U << 23) | ((1 << 6) - 1)); -const int kMsaELMMask = (15U << 22); -const int kMsaLongerELMMask = kMsaELMMask | (63U << 16); -const int kMsa3RMask = ((7U << 23) | ((1 << 6) - 1)); -const int kMsa3RFMask = ((15U << 22) | ((1 << 6) - 1)); -const int kMsaVECMask = (23U << 21); -const int kMsa2RMask = (7U << 18); -const int kMsa2RFMask = (15U << 17); -const int kRsFieldMask = ((1 << kRsBits) - 1) << kRsShift; -const int kRtFieldMask = ((1 << kRtBits) - 1) << kRtShift; -const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift; -const int kSaFieldMask = ((1 << kSaBits) - 1) << kSaShift; -const int kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift; -// Misc masks. -const int kHiMask = 0xffff << 16; -const int kLoMask = 0xffff; -const int kSignMask = 0x80000000; -const int kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1; - -// ----- MIPS Opcodes and Function Fields. -// We use this presentation to stay close to the table representation in -// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set. -enum Opcode : uint32_t { - SPECIAL = 0U << kOpcodeShift, - REGIMM = 1U << kOpcodeShift, - - J = ((0U << 3) + 2) << kOpcodeShift, - JAL = ((0U << 3) + 3) << kOpcodeShift, - BEQ = ((0U << 3) + 4) << kOpcodeShift, - BNE = ((0U << 3) + 5) << kOpcodeShift, - BLEZ = ((0U << 3) + 6) << kOpcodeShift, - BGTZ = ((0U << 3) + 7) << kOpcodeShift, - - ADDI = ((1U << 3) + 0) << kOpcodeShift, - ADDIU = ((1U << 3) + 1) << kOpcodeShift, - SLTI = ((1U << 3) + 2) << kOpcodeShift, - SLTIU = ((1U << 3) + 3) << kOpcodeShift, - ANDI = ((1U << 3) + 4) << kOpcodeShift, - ORI = ((1U << 3) + 5) << kOpcodeShift, - XORI = ((1U << 3) + 6) << kOpcodeShift, - LUI = ((1U << 3) + 7) << kOpcodeShift, // LUI/AUI family. - - BEQC = ((2U << 3) + 0) << kOpcodeShift, - COP1 = ((2U << 3) + 1) << kOpcodeShift, // Coprocessor 1 class. - BEQL = ((2U << 3) + 4) << kOpcodeShift, - BNEL = ((2U << 3) + 5) << kOpcodeShift, - BLEZL = ((2U << 3) + 6) << kOpcodeShift, - BGTZL = ((2U << 3) + 7) << kOpcodeShift, - - DADDI = ((3U << 3) + 0) << kOpcodeShift, // This is also BNEC. - SPECIAL2 = ((3U << 3) + 4) << kOpcodeShift, - MSA = ((3U << 3) + 6) << kOpcodeShift, - SPECIAL3 = ((3U << 3) + 7) << kOpcodeShift, - - LB = ((4U << 3) + 0) << kOpcodeShift, - LH = ((4U << 3) + 1) << kOpcodeShift, - LWL = ((4U << 3) + 2) << kOpcodeShift, - LW = ((4U << 3) + 3) << kOpcodeShift, - LBU = ((4U << 3) + 4) << kOpcodeShift, - LHU = ((4U << 3) + 5) << kOpcodeShift, - LWR = ((4U << 3) + 6) << kOpcodeShift, - SB = ((5U << 3) + 0) << kOpcodeShift, - SH = ((5U << 3) + 1) << kOpcodeShift, - SWL = ((5U << 3) + 2) << kOpcodeShift, - SW = ((5U << 3) + 3) << kOpcodeShift, - SWR = ((5U << 3) + 6) << kOpcodeShift, - - LL = ((6U << 3) + 0) << kOpcodeShift, - LWC1 = ((6U << 3) + 1) << kOpcodeShift, - BC = ((6U << 3) + 2) << kOpcodeShift, - LDC1 = ((6U << 3) + 5) << kOpcodeShift, - POP66 = ((6U << 3) + 6) << kOpcodeShift, // beqzc, jic - - PREF = ((6U << 3) + 3) << kOpcodeShift, - - SC = ((7U << 3) + 0) << kOpcodeShift, - SWC1 = ((7U << 3) + 1) << kOpcodeShift, - BALC = ((7U << 3) + 2) << kOpcodeShift, - PCREL = ((7U << 3) + 3) << kOpcodeShift, - SDC1 = ((7U << 3) + 5) << kOpcodeShift, - POP76 = ((7U << 3) + 6) << kOpcodeShift, // bnezc, jialc - - COP1X = ((1U << 4) + 3) << kOpcodeShift, - - // New r6 instruction. - POP06 = BLEZ, // bgeuc/bleuc, blezalc, bgezalc - POP07 = BGTZ, // bltuc/bgtuc, bgtzalc, bltzalc - POP10 = ADDI, // beqzalc, bovc, beqc - POP26 = BLEZL, // bgezc, blezc, bgec/blec - POP27 = BGTZL, // bgtzc, bltzc, bltc/bgtc - POP30 = DADDI, // bnezalc, bnvc, bnec -}; - -enum SecondaryField : uint32_t { - // SPECIAL Encoding of Function Field. - SLL = ((0U << 3) + 0), - MOVCI = ((0U << 3) + 1), - SRL = ((0U << 3) + 2), - SRA = ((0U << 3) + 3), - SLLV = ((0U << 3) + 4), - LSA = ((0U << 3) + 5), - SRLV = ((0U << 3) + 6), - SRAV = ((0U << 3) + 7), - - JR = ((1U << 3) + 0), - JALR = ((1U << 3) + 1), - MOVZ = ((1U << 3) + 2), - MOVN = ((1U << 3) + 3), - BREAK = ((1U << 3) + 5), - SYNC = ((1U << 3) + 7), - - MFHI = ((2U << 3) + 0), - CLZ_R6 = ((2U << 3) + 0), - CLO_R6 = ((2U << 3) + 1), - MFLO = ((2U << 3) + 2), - - MULT = ((3U << 3) + 0), - MULTU = ((3U << 3) + 1), - DIV = ((3U << 3) + 2), - DIVU = ((3U << 3) + 3), - - ADD = ((4U << 3) + 0), - ADDU = ((4U << 3) + 1), - SUB = ((4U << 3) + 2), - SUBU = ((4U << 3) + 3), - AND = ((4U << 3) + 4), - OR = ((4U << 3) + 5), - XOR = ((4U << 3) + 6), - NOR = ((4U << 3) + 7), - - SLT = ((5U << 3) + 2), - SLTU = ((5U << 3) + 3), - - TGE = ((6U << 3) + 0), - TGEU = ((6U << 3) + 1), - TLT = ((6U << 3) + 2), - TLTU = ((6U << 3) + 3), - TEQ = ((6U << 3) + 4), - SELEQZ_S = ((6U << 3) + 5), - TNE = ((6U << 3) + 6), - SELNEZ_S = ((6U << 3) + 7), - - // Multiply integers in r6. - MUL_MUH = ((3U << 3) + 0), // MUL, MUH. - MUL_MUH_U = ((3U << 3) + 1), // MUL_U, MUH_U. - RINT = ((3U << 3) + 2), - - MUL_OP = ((0U << 3) + 2), - MUH_OP = ((0U << 3) + 3), - DIV_OP = ((0U << 3) + 2), - MOD_OP = ((0U << 3) + 3), - - DIV_MOD = ((3U << 3) + 2), - DIV_MOD_U = ((3U << 3) + 3), - - // SPECIAL2 Encoding of Function Field. - MUL = ((0U << 3) + 2), - CLZ = ((4U << 3) + 0), - CLO = ((4U << 3) + 1), - - // SPECIAL3 Encoding of Function Field. - EXT = ((0U << 3) + 0), - INS = ((0U << 3) + 4), - BSHFL = ((4U << 3) + 0), - SC_R6 = ((4U << 3) + 6), - LL_R6 = ((6U << 3) + 6), - - // SPECIAL3 Encoding of sa Field. - BITSWAP = ((0U << 3) + 0), - ALIGN = ((0U << 3) + 2), - WSBH = ((0U << 3) + 2), - SEB = ((2U << 3) + 0), - SEH = ((3U << 3) + 0), - - // REGIMM encoding of rt Field. - BLTZ = ((0U << 3) + 0) << 16, - BGEZ = ((0U << 3) + 1) << 16, - BLTZAL = ((2U << 3) + 0) << 16, - BGEZAL = ((2U << 3) + 1) << 16, - BGEZALL = ((2U << 3) + 3) << 16, - - // COP1 Encoding of rs Field. - MFC1 = ((0U << 3) + 0) << 21, - CFC1 = ((0U << 3) + 2) << 21, - MFHC1 = ((0U << 3) + 3) << 21, - MTC1 = ((0U << 3) + 4) << 21, - CTC1 = ((0U << 3) + 6) << 21, - MTHC1 = ((0U << 3) + 7) << 21, - BC1 = ((1U << 3) + 0) << 21, - S = ((2U << 3) + 0) << 21, - D = ((2U << 3) + 1) << 21, - W = ((2U << 3) + 4) << 21, - L = ((2U << 3) + 5) << 21, - PS = ((2U << 3) + 6) << 21, - // COP1 Encoding of Function Field When rs=S. - - ADD_S = ((0U << 3) + 0), - SUB_S = ((0U << 3) + 1), - MUL_S = ((0U << 3) + 2), - DIV_S = ((0U << 3) + 3), - ABS_S = ((0U << 3) + 5), - SQRT_S = ((0U << 3) + 4), - MOV_S = ((0U << 3) + 6), - NEG_S = ((0U << 3) + 7), - ROUND_L_S = ((1U << 3) + 0), - TRUNC_L_S = ((1U << 3) + 1), - CEIL_L_S = ((1U << 3) + 2), - FLOOR_L_S = ((1U << 3) + 3), - ROUND_W_S = ((1U << 3) + 4), - TRUNC_W_S = ((1U << 3) + 5), - CEIL_W_S = ((1U << 3) + 6), - FLOOR_W_S = ((1U << 3) + 7), - RECIP_S = ((2U << 3) + 5), - RSQRT_S = ((2U << 3) + 6), - MADDF_S = ((3U << 3) + 0), - MSUBF_S = ((3U << 3) + 1), - CLASS_S = ((3U << 3) + 3), - CVT_D_S = ((4U << 3) + 1), - CVT_W_S = ((4U << 3) + 4), - CVT_L_S = ((4U << 3) + 5), - CVT_PS_S = ((4U << 3) + 6), - - // COP1 Encoding of Function Field When rs=D. - ADD_D = ((0U << 3) + 0), - SUB_D = ((0U << 3) + 1), - MUL_D = ((0U << 3) + 2), - DIV_D = ((0U << 3) + 3), - SQRT_D = ((0U << 3) + 4), - ABS_D = ((0U << 3) + 5), - MOV_D = ((0U << 3) + 6), - NEG_D = ((0U << 3) + 7), - ROUND_L_D = ((1U << 3) + 0), - TRUNC_L_D = ((1U << 3) + 1), - CEIL_L_D = ((1U << 3) + 2), - FLOOR_L_D = ((1U << 3) + 3), - ROUND_W_D = ((1U << 3) + 4), - TRUNC_W_D = ((1U << 3) + 5), - CEIL_W_D = ((1U << 3) + 6), - FLOOR_W_D = ((1U << 3) + 7), - RECIP_D = ((2U << 3) + 5), - RSQRT_D = ((2U << 3) + 6), - MADDF_D = ((3U << 3) + 0), - MSUBF_D = ((3U << 3) + 1), - CLASS_D = ((3U << 3) + 3), - MIN = ((3U << 3) + 4), - MINA = ((3U << 3) + 5), - MAX = ((3U << 3) + 6), - MAXA = ((3U << 3) + 7), - CVT_S_D = ((4U << 3) + 0), - CVT_W_D = ((4U << 3) + 4), - CVT_L_D = ((4U << 3) + 5), - C_F_D = ((6U << 3) + 0), - C_UN_D = ((6U << 3) + 1), - C_EQ_D = ((6U << 3) + 2), - C_UEQ_D = ((6U << 3) + 3), - C_OLT_D = ((6U << 3) + 4), - C_ULT_D = ((6U << 3) + 5), - C_OLE_D = ((6U << 3) + 6), - C_ULE_D = ((6U << 3) + 7), - - // COP1 Encoding of Function Field When rs=W or L. - CVT_S_W = ((4U << 3) + 0), - CVT_D_W = ((4U << 3) + 1), - CVT_S_L = ((4U << 3) + 0), - CVT_D_L = ((4U << 3) + 1), - BC1EQZ = ((2U << 2) + 1) << 21, - BC1NEZ = ((3U << 2) + 1) << 21, - // COP1 CMP positive predicates Bit 5..4 = 00. - CMP_AF = ((0U << 3) + 0), - CMP_UN = ((0U << 3) + 1), - CMP_EQ = ((0U << 3) + 2), - CMP_UEQ = ((0U << 3) + 3), - CMP_LT = ((0U << 3) + 4), - CMP_ULT = ((0U << 3) + 5), - CMP_LE = ((0U << 3) + 6), - CMP_ULE = ((0U << 3) + 7), - CMP_SAF = ((1U << 3) + 0), - CMP_SUN = ((1U << 3) + 1), - CMP_SEQ = ((1U << 3) + 2), - CMP_SUEQ = ((1U << 3) + 3), - CMP_SSLT = ((1U << 3) + 4), - CMP_SSULT = ((1U << 3) + 5), - CMP_SLE = ((1U << 3) + 6), - CMP_SULE = ((1U << 3) + 7), - // COP1 CMP negative predicates Bit 5..4 = 01. - CMP_AT = ((2U << 3) + 0), // Reserved, not implemented. - CMP_OR = ((2U << 3) + 1), - CMP_UNE = ((2U << 3) + 2), - CMP_NE = ((2U << 3) + 3), - CMP_UGE = ((2U << 3) + 4), // Reserved, not implemented. - CMP_OGE = ((2U << 3) + 5), // Reserved, not implemented. - CMP_UGT = ((2U << 3) + 6), // Reserved, not implemented. - CMP_OGT = ((2U << 3) + 7), // Reserved, not implemented. - CMP_SAT = ((3U << 3) + 0), // Reserved, not implemented. - CMP_SOR = ((3U << 3) + 1), - CMP_SUNE = ((3U << 3) + 2), - CMP_SNE = ((3U << 3) + 3), - CMP_SUGE = ((3U << 3) + 4), // Reserved, not implemented. - CMP_SOGE = ((3U << 3) + 5), // Reserved, not implemented. - CMP_SUGT = ((3U << 3) + 6), // Reserved, not implemented. - CMP_SOGT = ((3U << 3) + 7), // Reserved, not implemented. - - SEL = ((2U << 3) + 0), - MOVZ_C = ((2U << 3) + 2), - MOVN_C = ((2U << 3) + 3), - SELEQZ_C = ((2U << 3) + 4), // COP1 on FPR registers. - MOVF = ((2U << 3) + 1), // Function field for MOVT.fmt and MOVF.fmt - SELNEZ_C = ((2U << 3) + 7), // COP1 on FPR registers. - // COP1 Encoding of Function Field When rs=PS. - - // COP1X Encoding of Function Field. - MADD_S = ((4U << 3) + 0), - MADD_D = ((4U << 3) + 1), - MSUB_S = ((5U << 3) + 0), - MSUB_D = ((5U << 3) + 1), - - // PCREL Encoding of rt Field. - ADDIUPC = ((0U << 2) + 0), - LWPC = ((0U << 2) + 1), - AUIPC = ((3U << 3) + 6), - ALUIPC = ((3U << 3) + 7), - - // POP66 Encoding of rs Field. - JIC = ((0U << 5) + 0), - - // POP76 Encoding of rs Field. - JIALC = ((0U << 5) + 0), - - // COP1 Encoding of rs Field for MSA Branch Instructions - BZ_V = (((1U << 3) + 3) << kRsShift), - BNZ_V = (((1U << 3) + 7) << kRsShift), - BZ_B = (((3U << 3) + 0) << kRsShift), - BZ_H = (((3U << 3) + 1) << kRsShift), - BZ_W = (((3U << 3) + 2) << kRsShift), - BZ_D = (((3U << 3) + 3) << kRsShift), - BNZ_B = (((3U << 3) + 4) << kRsShift), - BNZ_H = (((3U << 3) + 5) << kRsShift), - BNZ_W = (((3U << 3) + 6) << kRsShift), - BNZ_D = (((3U << 3) + 7) << kRsShift), - - // MSA: Operation Field for MI10 Instruction Formats - MSA_LD = (8U << 2), - MSA_ST = (9U << 2), - LD_B = ((8U << 2) + 0), - LD_H = ((8U << 2) + 1), - LD_W = ((8U << 2) + 2), - LD_D = ((8U << 2) + 3), - ST_B = ((9U << 2) + 0), - ST_H = ((9U << 2) + 1), - ST_W = ((9U << 2) + 2), - ST_D = ((9U << 2) + 3), - - // MSA: Operation Field for I5 Instruction Format - ADDVI = ((0U << 23) + 6), - SUBVI = ((1U << 23) + 6), - MAXI_S = ((2U << 23) + 6), - MAXI_U = ((3U << 23) + 6), - MINI_S = ((4U << 23) + 6), - MINI_U = ((5U << 23) + 6), - CEQI = ((0U << 23) + 7), - CLTI_S = ((2U << 23) + 7), - CLTI_U = ((3U << 23) + 7), - CLEI_S = ((4U << 23) + 7), - CLEI_U = ((5U << 23) + 7), - LDI = ((6U << 23) + 7), // I10 instruction format - I5_DF_b = (0U << 21), - I5_DF_h = (1U << 21), - I5_DF_w = (2U << 21), - I5_DF_d = (3U << 21), - - // MSA: Operation Field for I8 Instruction Format - ANDI_B = ((0U << 24) + 0), - ORI_B = ((1U << 24) + 0), - NORI_B = ((2U << 24) + 0), - XORI_B = ((3U << 24) + 0), - BMNZI_B = ((0U << 24) + 1), - BMZI_B = ((1U << 24) + 1), - BSELI_B = ((2U << 24) + 1), - SHF_B = ((0U << 24) + 2), - SHF_H = ((1U << 24) + 2), - SHF_W = ((2U << 24) + 2), - - MSA_VEC_2R_2RF_MINOR = ((3U << 3) + 6), - - // MSA: Operation Field for VEC Instruction Formats - AND_V = (((0U << 2) + 0) << 21), - OR_V = (((0U << 2) + 1) << 21), - NOR_V = (((0U << 2) + 2) << 21), - XOR_V = (((0U << 2) + 3) << 21), - BMNZ_V = (((1U << 2) + 0) << 21), - BMZ_V = (((1U << 2) + 1) << 21), - BSEL_V = (((1U << 2) + 2) << 21), - - // MSA: Operation Field for 2R Instruction Formats - MSA_2R_FORMAT = (((6U << 2) + 0) << 21), - FILL = (0U << 18), - PCNT = (1U << 18), - NLOC = (2U << 18), - NLZC = (3U << 18), - MSA_2R_DF_b = (0U << 16), - MSA_2R_DF_h = (1U << 16), - MSA_2R_DF_w = (2U << 16), - MSA_2R_DF_d = (3U << 16), - - // MSA: Operation Field for 2RF Instruction Formats - MSA_2RF_FORMAT = (((6U << 2) + 1) << 21), - FCLASS = (0U << 17), - FTRUNC_S = (1U << 17), - FTRUNC_U = (2U << 17), - FSQRT = (3U << 17), - FRSQRT = (4U << 17), - FRCP = (5U << 17), - FRINT = (6U << 17), - FLOG2 = (7U << 17), - FEXUPL = (8U << 17), - FEXUPR = (9U << 17), - FFQL = (10U << 17), - FFQR = (11U << 17), - FTINT_S = (12U << 17), - FTINT_U = (13U << 17), - FFINT_S = (14U << 17), - FFINT_U = (15U << 17), - MSA_2RF_DF_w = (0U << 16), - MSA_2RF_DF_d = (1U << 16), - - // MSA: Operation Field for 3R Instruction Format - SLL_MSA = ((0U << 23) + 13), - SRA_MSA = ((1U << 23) + 13), - SRL_MSA = ((2U << 23) + 13), - BCLR = ((3U << 23) + 13), - BSET = ((4U << 23) + 13), - BNEG = ((5U << 23) + 13), - BINSL = ((6U << 23) + 13), - BINSR = ((7U << 23) + 13), - ADDV = ((0U << 23) + 14), - SUBV = ((1U << 23) + 14), - MAX_S = ((2U << 23) + 14), - MAX_U = ((3U << 23) + 14), - MIN_S = ((4U << 23) + 14), - MIN_U = ((5U << 23) + 14), - MAX_A = ((6U << 23) + 14), - MIN_A = ((7U << 23) + 14), - CEQ = ((0U << 23) + 15), - CLT_S = ((2U << 23) + 15), - CLT_U = ((3U << 23) + 15), - CLE_S = ((4U << 23) + 15), - CLE_U = ((5U << 23) + 15), - ADD_A = ((0U << 23) + 16), - ADDS_A = ((1U << 23) + 16), - ADDS_S = ((2U << 23) + 16), - ADDS_U = ((3U << 23) + 16), - AVE_S = ((4U << 23) + 16), - AVE_U = ((5U << 23) + 16), - AVER_S = ((6U << 23) + 16), - AVER_U = ((7U << 23) + 16), - SUBS_S = ((0U << 23) + 17), - SUBS_U = ((1U << 23) + 17), - SUBSUS_U = ((2U << 23) + 17), - SUBSUU_S = ((3U << 23) + 17), - ASUB_S = ((4U << 23) + 17), - ASUB_U = ((5U << 23) + 17), - MULV = ((0U << 23) + 18), - MADDV = ((1U << 23) + 18), - MSUBV = ((2U << 23) + 18), - DIV_S_MSA = ((4U << 23) + 18), - DIV_U = ((5U << 23) + 18), - MOD_S = ((6U << 23) + 18), - MOD_U = ((7U << 23) + 18), - DOTP_S = ((0U << 23) + 19), - DOTP_U = ((1U << 23) + 19), - DPADD_S = ((2U << 23) + 19), - DPADD_U = ((3U << 23) + 19), - DPSUB_S = ((4U << 23) + 19), - DPSUB_U = ((5U << 23) + 19), - SLD = ((0U << 23) + 20), - SPLAT = ((1U << 23) + 20), - PCKEV = ((2U << 23) + 20), - PCKOD = ((3U << 23) + 20), - ILVL = ((4U << 23) + 20), - ILVR = ((5U << 23) + 20), - ILVEV = ((6U << 23) + 20), - ILVOD = ((7U << 23) + 20), - VSHF = ((0U << 23) + 21), - SRAR = ((1U << 23) + 21), - SRLR = ((2U << 23) + 21), - HADD_S = ((4U << 23) + 21), - HADD_U = ((5U << 23) + 21), - HSUB_S = ((6U << 23) + 21), - HSUB_U = ((7U << 23) + 21), - MSA_3R_DF_b = (0U << 21), - MSA_3R_DF_h = (1U << 21), - MSA_3R_DF_w = (2U << 21), - MSA_3R_DF_d = (3U << 21), - - // MSA: Operation Field for 3RF Instruction Format - FCAF = ((0U << 22) + 26), - FCUN = ((1U << 22) + 26), - FCEQ = ((2U << 22) + 26), - FCUEQ = ((3U << 22) + 26), - FCLT = ((4U << 22) + 26), - FCULT = ((5U << 22) + 26), - FCLE = ((6U << 22) + 26), - FCULE = ((7U << 22) + 26), - FSAF = ((8U << 22) + 26), - FSUN = ((9U << 22) + 26), - FSEQ = ((10U << 22) + 26), - FSUEQ = ((11U << 22) + 26), - FSLT = ((12U << 22) + 26), - FSULT = ((13U << 22) + 26), - FSLE = ((14U << 22) + 26), - FSULE = ((15U << 22) + 26), - FADD = ((0U << 22) + 27), - FSUB = ((1U << 22) + 27), - FMUL = ((2U << 22) + 27), - FDIV = ((3U << 22) + 27), - FMADD = ((4U << 22) + 27), - FMSUB = ((5U << 22) + 27), - FEXP2 = ((7U << 22) + 27), - FEXDO = ((8U << 22) + 27), - FTQ = ((10U << 22) + 27), - FMIN = ((12U << 22) + 27), - FMIN_A = ((13U << 22) + 27), - FMAX = ((14U << 22) + 27), - FMAX_A = ((15U << 22) + 27), - FCOR = ((1U << 22) + 28), - FCUNE = ((2U << 22) + 28), - FCNE = ((3U << 22) + 28), - MUL_Q = ((4U << 22) + 28), - MADD_Q = ((5U << 22) + 28), - MSUB_Q = ((6U << 22) + 28), - FSOR = ((9U << 22) + 28), - FSUNE = ((10U << 22) + 28), - FSNE = ((11U << 22) + 28), - MULR_Q = ((12U << 22) + 28), - MADDR_Q = ((13U << 22) + 28), - MSUBR_Q = ((14U << 22) + 28), - - // MSA: Operation Field for ELM Instruction Format - MSA_ELM_MINOR = ((3U << 3) + 1), - SLDI = (0U << 22), - CTCMSA = ((0U << 22) | (62U << 16)), - SPLATI = (1U << 22), - CFCMSA = ((1U << 22) | (62U << 16)), - COPY_S = (2U << 22), - MOVE_V = ((2U << 22) | (62U << 16)), - COPY_U = (3U << 22), - INSERT = (4U << 22), - INSVE = (5U << 22), - ELM_DF_B = ((0U << 4) << 16), - ELM_DF_H = ((4U << 3) << 16), - ELM_DF_W = ((12U << 2) << 16), - ELM_DF_D = ((28U << 1) << 16), - - // MSA: Operation Field for BIT Instruction Format - SLLI = ((0U << 23) + 9), - SRAI = ((1U << 23) + 9), - SRLI = ((2U << 23) + 9), - BCLRI = ((3U << 23) + 9), - BSETI = ((4U << 23) + 9), - BNEGI = ((5U << 23) + 9), - BINSLI = ((6U << 23) + 9), - BINSRI = ((7U << 23) + 9), - SAT_S = ((0U << 23) + 10), - SAT_U = ((1U << 23) + 10), - SRARI = ((2U << 23) + 10), - SRLRI = ((3U << 23) + 10), - BIT_DF_b = ((14U << 3) << 16), - BIT_DF_h = ((6U << 4) << 16), - BIT_DF_w = ((2U << 5) << 16), - BIT_DF_d = ((0U << 6) << 16), - - nullptrSF = 0U -}; - -enum MSAMinorOpcode : uint32_t { - kMsaMinorUndefined = 0, - kMsaMinorI8, - kMsaMinorI5, - kMsaMinorI10, - kMsaMinorBIT, - kMsaMinor3R, - kMsaMinor3RF, - kMsaMinorELM, - kMsaMinorVEC, - kMsaMinor2R, - kMsaMinor2RF, - kMsaMinorMI10 -}; - -// ----- Emulated conditions. -// On MIPS we use this enum to abstract from conditional branch instructions. -// The 'U' prefix is used to specify unsigned comparisons. -// Opposite conditions must be paired as odd/even numbers -// because 'NegateCondition' function flips LSB to negate condition. -enum Condition { - overflow = 0, - no_overflow = 1, - Uless = 2, - Ugreater_equal = 3, - Uless_equal = 4, - Ugreater = 5, - equal = 6, - not_equal = 7, // Unordered or Not Equal. - negative = 8, - positive = 9, - parity_even = 10, - parity_odd = 11, - less = 12, - greater_equal = 13, - less_equal = 14, - greater = 15, - ueq = 16, // Unordered or Equal. - ogl = 17, // Ordered and Not Equal. - cc_always = 18, - - // Aliases. - carry = Uless, - not_carry = Ugreater_equal, - zero = equal, - eq = equal, - not_zero = not_equal, - ne = not_equal, - nz = not_equal, - sign = negative, - not_sign = positive, - mi = negative, - pl = positive, - hi = Ugreater, - ls = Uless_equal, - ge = greater_equal, - lt = less, - gt = greater, - le = less_equal, - hs = Ugreater_equal, - lo = Uless, - al = cc_always, - ult = Uless, - uge = Ugreater_equal, - ule = Uless_equal, - ugt = Ugreater, -}; - -// Returns the equivalent of !cc. -inline Condition NegateCondition(Condition cc) { - DCHECK(cc != cc_always); - return static_cast(cc ^ 1); -} - -inline Condition NegateFpuCondition(Condition cc) { - DCHECK(cc != cc_always); - switch (cc) { - case ult: - return ge; - case ugt: - return le; - case uge: - return lt; - case ule: - return gt; - case lt: - return uge; - case gt: - return ule; - case ge: - return ult; - case le: - return ugt; - case eq: - return ne; - case ne: - return eq; - case ueq: - return ogl; - case ogl: - return ueq; - default: - return cc; - } -} - -enum MSABranchCondition { - all_not_zero = 0, // Branch If All Elements Are Not Zero - one_elem_not_zero, // Branch If At Least One Element of Any Format Is Not - // Zero - one_elem_zero, // Branch If At Least One Element Is Zero - all_zero // Branch If All Elements of Any Format Are Zero -}; - -inline MSABranchCondition NegateMSABranchCondition(MSABranchCondition cond) { - switch (cond) { - case all_not_zero: - return one_elem_zero; - case one_elem_not_zero: - return all_zero; - case one_elem_zero: - return all_not_zero; - case all_zero: - return one_elem_not_zero; - default: - return cond; - } -} - -enum MSABranchDF { - MSA_BRANCH_B = 0, - MSA_BRANCH_H, - MSA_BRANCH_W, - MSA_BRANCH_D, - MSA_BRANCH_V -}; - -// ----- Coprocessor conditions. -enum FPUCondition { - kNoFPUCondition = -1, - - F = 0x00, // False. - UN = 0x01, // Unordered. - EQ = 0x02, // Equal. - UEQ = 0x03, // Unordered or Equal. - OLT = 0x04, // Ordered or Less Than, on Mips release < 6. - LT = 0x04, // Ordered or Less Than, on Mips release >= 6. - ULT = 0x05, // Unordered or Less Than. - OLE = 0x06, // Ordered or Less Than or Equal, on Mips release < 6. - LE = 0x06, // Ordered or Less Than or Equal, on Mips release >= 6. - ULE = 0x07, // Unordered or Less Than or Equal. - - // Following constants are available on Mips release >= 6 only. - ORD = 0x11, // Ordered, on Mips release >= 6. - UNE = 0x12, // Not equal, on Mips release >= 6. - NE = 0x13, // Ordered Greater Than or Less Than. on Mips >= 6 only. -}; - -// FPU rounding modes. -enum FPURoundingMode { - RN = 0 << 0, // Round to Nearest. - RZ = 1 << 0, // Round towards zero. - RP = 2 << 0, // Round towards Plus Infinity. - RM = 3 << 0, // Round towards Minus Infinity. - - // Aliases. - kRoundToNearest = RN, - kRoundToZero = RZ, - kRoundToPlusInf = RP, - kRoundToMinusInf = RM, - - mode_round = RN, - mode_ceil = RP, - mode_floor = RM, - mode_trunc = RZ -}; - -const uint32_t kFPURoundingModeMask = 3 << 0; - -enum CheckForInexactConversion { - kCheckForInexactConversion, - kDontCheckForInexactConversion -}; - -enum class MaxMinKind : int { kMin = 0, kMax = 1 }; - -// ----------------------------------------------------------------------------- -// Hints. - -// Branch hints are not used on the MIPS. They are defined so that they can -// appear in shared function signatures, but will be ignored in MIPS -// implementations. -enum Hint { no_hint = 0 }; - -inline Hint NegateHint(Hint hint) { return no_hint; } - -// ----------------------------------------------------------------------------- -// Specific instructions, constants, and masks. -// These constants are declared in assembler-mips.cc, as they use named -// registers and other constants. - -// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r) -// operations as post-increment of sp. -extern const Instr kPopInstruction; -// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp. -extern const Instr kPushInstruction; -// sw(r, MemOperand(sp, 0)) -extern const Instr kPushRegPattern; -// lw(r, MemOperand(sp, 0)) -extern const Instr kPopRegPattern; -extern const Instr kLwRegFpOffsetPattern; -extern const Instr kSwRegFpOffsetPattern; -extern const Instr kLwRegFpNegOffsetPattern; -extern const Instr kSwRegFpNegOffsetPattern; -// A mask for the Rt register for push, pop, lw, sw instructions. -extern const Instr kRtMask; -extern const Instr kLwSwInstrTypeMask; -extern const Instr kLwSwInstrArgumentMask; -extern const Instr kLwSwOffsetMask; - -// Break 0xfffff, reserved for redirected real time call. -const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6; -// A nop instruction. (Encoding of sll 0 0 0). -const Instr nopInstr = 0; - -static constexpr uint64_t OpcodeToBitNumber(Opcode opcode) { - return 1ULL << (static_cast(opcode) >> kOpcodeShift); -} - -constexpr uint8_t kInstrSize = 4; -constexpr uint8_t kInstrSizeLog2 = 2; - -class InstructionBase { - public: - enum { - // On MIPS PC cannot actually be directly accessed. We behave as if PC was - // always the value of the current instruction being executed. - kPCReadOffset = 0 - }; - - // Instruction type. - enum Type { kRegisterType, kImmediateType, kJumpType, kUnsupported = -1 }; - - // Get the raw instruction bits. - inline Instr InstructionBits() const { - return *reinterpret_cast(this); - } - - // Set the raw instruction bits to value. - inline void SetInstructionBits(Instr value) { - *reinterpret_cast(this) = value; - } - - // Read one particular bit out of the instruction bits. - inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; } - - // Read a bit field out of the instruction bits. - inline int Bits(int hi, int lo) const { - return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1); - } - - static constexpr uint64_t kOpcodeImmediateTypeMask = - OpcodeToBitNumber(REGIMM) | OpcodeToBitNumber(BEQ) | - OpcodeToBitNumber(BNE) | OpcodeToBitNumber(BLEZ) | - OpcodeToBitNumber(BGTZ) | OpcodeToBitNumber(ADDI) | - OpcodeToBitNumber(DADDI) | OpcodeToBitNumber(ADDIU) | - OpcodeToBitNumber(SLTI) | OpcodeToBitNumber(SLTIU) | - OpcodeToBitNumber(ANDI) | OpcodeToBitNumber(ORI) | - OpcodeToBitNumber(XORI) | OpcodeToBitNumber(LUI) | - OpcodeToBitNumber(BEQL) | OpcodeToBitNumber(BNEL) | - OpcodeToBitNumber(BLEZL) | OpcodeToBitNumber(BGTZL) | - OpcodeToBitNumber(POP66) | OpcodeToBitNumber(POP76) | - OpcodeToBitNumber(LB) | OpcodeToBitNumber(LH) | OpcodeToBitNumber(LWL) | - OpcodeToBitNumber(LW) | OpcodeToBitNumber(LBU) | OpcodeToBitNumber(LHU) | - OpcodeToBitNumber(LWR) | OpcodeToBitNumber(SB) | OpcodeToBitNumber(SH) | - OpcodeToBitNumber(SWL) | OpcodeToBitNumber(SW) | OpcodeToBitNumber(SWR) | - OpcodeToBitNumber(LWC1) | OpcodeToBitNumber(LDC1) | - OpcodeToBitNumber(SWC1) | OpcodeToBitNumber(SDC1) | - OpcodeToBitNumber(PCREL) | OpcodeToBitNumber(BC) | - OpcodeToBitNumber(BALC); - -#define FunctionFieldToBitNumber(function) (1ULL << function) - - static const uint64_t kFunctionFieldRegisterTypeMask = - FunctionFieldToBitNumber(JR) | FunctionFieldToBitNumber(JALR) | - FunctionFieldToBitNumber(BREAK) | FunctionFieldToBitNumber(SLL) | - FunctionFieldToBitNumber(SRL) | FunctionFieldToBitNumber(SRA) | - FunctionFieldToBitNumber(SLLV) | FunctionFieldToBitNumber(SRLV) | - FunctionFieldToBitNumber(SRAV) | FunctionFieldToBitNumber(LSA) | - FunctionFieldToBitNumber(MFHI) | FunctionFieldToBitNumber(MFLO) | - FunctionFieldToBitNumber(MULT) | FunctionFieldToBitNumber(MULTU) | - FunctionFieldToBitNumber(DIV) | FunctionFieldToBitNumber(DIVU) | - FunctionFieldToBitNumber(ADD) | FunctionFieldToBitNumber(ADDU) | - FunctionFieldToBitNumber(SUB) | FunctionFieldToBitNumber(SUBU) | - FunctionFieldToBitNumber(AND) | FunctionFieldToBitNumber(OR) | - FunctionFieldToBitNumber(XOR) | FunctionFieldToBitNumber(NOR) | - FunctionFieldToBitNumber(SLT) | FunctionFieldToBitNumber(SLTU) | - FunctionFieldToBitNumber(TGE) | FunctionFieldToBitNumber(TGEU) | - FunctionFieldToBitNumber(TLT) | FunctionFieldToBitNumber(TLTU) | - FunctionFieldToBitNumber(TEQ) | FunctionFieldToBitNumber(TNE) | - FunctionFieldToBitNumber(MOVZ) | FunctionFieldToBitNumber(MOVN) | - FunctionFieldToBitNumber(MOVCI) | FunctionFieldToBitNumber(SELEQZ_S) | - FunctionFieldToBitNumber(SELNEZ_S) | FunctionFieldToBitNumber(SYNC); - - // Accessors for the different named fields used in the MIPS encoding. - inline Opcode OpcodeValue() const { - return static_cast( - Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift)); - } - - inline int FunctionFieldRaw() const { - return InstructionBits() & kFunctionFieldMask; - } - - // Return the fields at their original place in the instruction encoding. - inline Opcode OpcodeFieldRaw() const { - return static_cast(InstructionBits() & kOpcodeMask); - } - - // Safe to call within InstructionType(). - inline int RsFieldRawNoAssert() const { - return InstructionBits() & kRsFieldMask; - } - - inline int SaFieldRaw() const { return InstructionBits() & kSaFieldMask; } - - // Get the encoding type of the instruction. - inline Type InstructionType() const; - - inline MSAMinorOpcode MSAMinorOpcodeField() const { - int op = this->FunctionFieldRaw(); - switch (op) { - case 0: - case 1: - case 2: - return kMsaMinorI8; - case 6: - return kMsaMinorI5; - case 7: - return (((this->InstructionBits() & kMsaI5I10Mask) == LDI) - ? kMsaMinorI10 - : kMsaMinorI5); - case 9: - case 10: - return kMsaMinorBIT; - case 13: - case 14: - case 15: - case 16: - case 17: - case 18: - case 19: - case 20: - case 21: - return kMsaMinor3R; - case 25: - return kMsaMinorELM; - case 26: - case 27: - case 28: - return kMsaMinor3RF; - case 30: - switch (this->RsFieldRawNoAssert()) { - case MSA_2R_FORMAT: - return kMsaMinor2R; - case MSA_2RF_FORMAT: - return kMsaMinor2RF; - default: - return kMsaMinorVEC; - } - break; - case 32: - case 33: - case 34: - case 35: - case 36: - case 37: - case 38: - case 39: - return kMsaMinorMI10; - default: - return kMsaMinorUndefined; - } - } - - protected: - InstructionBase() {} -}; - -template -class InstructionGetters : public T { - public: - inline int RsValue() const { - DCHECK(this->InstructionType() == InstructionBase::kRegisterType || - this->InstructionType() == InstructionBase::kImmediateType); - return InstructionBase::Bits(kRsShift + kRsBits - 1, kRsShift); - } - - inline int RtValue() const { - DCHECK(this->InstructionType() == InstructionBase::kRegisterType || - this->InstructionType() == InstructionBase::kImmediateType); - return this->Bits(kRtShift + kRtBits - 1, kRtShift); - } - - inline int RdValue() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); - return this->Bits(kRdShift + kRdBits - 1, kRdShift); - } - - inline int BaseValue() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kBaseShift + kBaseBits - 1, kBaseShift); - } - - inline int SaValue() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); - return this->Bits(kSaShift + kSaBits - 1, kSaShift); - } - - inline int LsaSaValue() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); - return this->Bits(kSaShift + kLsaSaBits - 1, kSaShift); - } - - inline int FunctionValue() const { - DCHECK(this->InstructionType() == InstructionBase::kRegisterType || - this->InstructionType() == InstructionBase::kImmediateType); - return this->Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift); - } - - inline int FdValue() const { - return this->Bits(kFdShift + kFdBits - 1, kFdShift); - } - - inline int FsValue() const { - return this->Bits(kFsShift + kFsBits - 1, kFsShift); - } - - inline int FtValue() const { - return this->Bits(kFtShift + kFtBits - 1, kFtShift); - } - - inline int FrValue() const { - return this->Bits(kFrShift + kFrBits - 1, kFrShift); - } - - inline int WdValue() const { - return this->Bits(kWdShift + kWdBits - 1, kWdShift); - } - - inline int WsValue() const { - return this->Bits(kWsShift + kWsBits - 1, kWsShift); - } - - inline int WtValue() const { - return this->Bits(kWtShift + kWtBits - 1, kWtShift); - } - - inline int Bp2Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); - return this->Bits(kBp2Shift + kBp2Bits - 1, kBp2Shift); - } - - // Float Compare condition code instruction bits. - inline int FCccValue() const { - return this->Bits(kFCccShift + kFCccBits - 1, kFCccShift); - } - - // Float Branch condition code instruction bits. - inline int FBccValue() const { - return this->Bits(kFBccShift + kFBccBits - 1, kFBccShift); - } - - // Float Branch true/false instruction bit. - inline int FBtrueValue() const { - return this->Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift); - } - - // Return the fields at their original place in the instruction encoding. - inline Opcode OpcodeFieldRaw() const { - return static_cast(this->InstructionBits() & kOpcodeMask); - } - - inline int RsFieldRaw() const { - DCHECK(this->InstructionType() == InstructionBase::kRegisterType || - this->InstructionType() == InstructionBase::kImmediateType); - return this->InstructionBits() & kRsFieldMask; - } - - inline int RtFieldRaw() const { - DCHECK(this->InstructionType() == InstructionBase::kRegisterType || - this->InstructionType() == InstructionBase::kImmediateType); - return this->InstructionBits() & kRtFieldMask; - } - - inline int RdFieldRaw() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kRegisterType); - return this->InstructionBits() & kRdFieldMask; - } - - inline int SaFieldRaw() const { - return this->InstructionBits() & kSaFieldMask; - } - - inline int FunctionFieldRaw() const { - return this->InstructionBits() & kFunctionFieldMask; - } - - // Get the secondary field according to the opcode. - inline int SecondaryValue() const { - Opcode op = this->OpcodeFieldRaw(); - switch (op) { - case SPECIAL: - case SPECIAL2: - return FunctionValue(); - case COP1: - return RsValue(); - case REGIMM: - return RtValue(); - default: - return nullptrSF; - } - } - - inline int32_t ImmValue(int bits) const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(bits - 1, 0); - } - - inline int32_t Imm9Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kImm9Shift + kImm9Bits - 1, kImm9Shift); - } - - inline int32_t Imm16Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift); - } - - inline int32_t Imm18Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kImm18Shift + kImm18Bits - 1, kImm18Shift); - } - - inline int32_t Imm19Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kImm19Shift + kImm19Bits - 1, kImm19Shift); - } - - inline int32_t Imm21Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift); - } - - inline int32_t Imm26Value() const { - DCHECK((this->InstructionType() == InstructionBase::kJumpType) || - (this->InstructionType() == InstructionBase::kImmediateType)); - return this->Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift); - } - - inline int32_t MsaImm8Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kMsaImm8Shift + kMsaImm8Bits - 1, kMsaImm8Shift); - } - - inline int32_t MsaImm5Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kMsaImm5Shift + kMsaImm5Bits - 1, kMsaImm5Shift); - } - - inline int32_t MsaImm10Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kMsaImm10Shift + kMsaImm10Bits - 1, kMsaImm10Shift); - } - - inline int32_t MsaImmMI10Value() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(kMsaImmMI10Shift + kMsaImmMI10Bits - 1, kMsaImmMI10Shift); - } - - inline int32_t MsaBitDf() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - int32_t df_m = this->Bits(22, 16); - if (((df_m >> 6) & 1U) == 0) { - return 3; - } else if (((df_m >> 5) & 3U) == 2) { - return 2; - } else if (((df_m >> 4) & 7U) == 6) { - return 1; - } else if (((df_m >> 3) & 15U) == 14) { - return 0; - } else { - return -1; - } - } - - inline int32_t MsaBitMValue() const { - DCHECK_EQ(this->InstructionType(), InstructionBase::kImmediateType); - return this->Bits(16 + this->MsaBitDf() + 3, 16); - } - - inline int32_t MsaElmDf() const { - DCHECK(this->InstructionType() == InstructionBase::kRegisterType || - this->InstructionType() == InstructionBase::kImmediateType); - int32_t df_n = this->Bits(21, 16); - if (((df_n >> 4) & 3U) == 0) { - return 0; - } else if (((df_n >> 3) & 7U) == 4) { - return 1; - } else if (((df_n >> 2) & 15U) == 12) { - return 2; - } else if (((df_n >> 1) & 31U) == 28) { - return 3; - } else { - return -1; - } - } - - inline int32_t MsaElmNValue() const { - DCHECK(this->InstructionType() == InstructionBase::kRegisterType || - this->InstructionType() == InstructionBase::kImmediateType); - return this->Bits(16 + 4 - this->MsaElmDf(), 16); - } - - static bool IsForbiddenAfterBranchInstr(Instr instr); - - // Say if the instruction should not be used in a branch delay slot or - // immediately after a compact branch. - inline bool IsForbiddenAfterBranch() const { - return IsForbiddenAfterBranchInstr(this->InstructionBits()); - } - - inline bool IsForbiddenInBranchDelay() const { - return IsForbiddenAfterBranch(); - } - - // Say if the instruction 'links'. e.g. jal, bal. - bool IsLinkingInstruction() const; - // Say if the instruction is a break or a trap. - bool IsTrap() const; - - inline bool IsMSABranchInstr() const { - if (this->OpcodeFieldRaw() == COP1) { - switch (this->RsFieldRaw()) { - case BZ_V: - case BZ_B: - case BZ_H: - case BZ_W: - case BZ_D: - case BNZ_V: - case BNZ_B: - case BNZ_H: - case BNZ_W: - case BNZ_D: - return true; - default: - return false; - } - } - return false; - } - - inline bool IsMSAInstr() const { - if (this->IsMSABranchInstr() || (this->OpcodeFieldRaw() == MSA)) - return true; - return false; - } -}; - -class Instruction : public InstructionGetters { - public: - // Instructions are read of out a code stream. The only way to get a - // reference to an instruction is to convert a pointer. There is no way - // to allocate or create instances of class Instruction. - // Use the At(pc) function to create references to Instruction. - static Instruction* At(byte* pc) { - return reinterpret_cast(pc); - } - - private: - // We need to prevent the creation of instances of class Instruction. - DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction); -}; - -// ----------------------------------------------------------------------------- -// MIPS assembly various constants. - -// C/C++ argument slots size. -const int kCArgSlotCount = 4; -const int kCArgsSlotsSize = kCArgSlotCount * kInstrSize; - -// JS argument slots size. -const int kJSArgsSlotsSize = 0 * kInstrSize; - -// Assembly builtins argument slots size. -const int kBArgsSlotsSize = 0 * kInstrSize; - -const int kBranchReturnOffset = 2 * kInstrSize; - -InstructionBase::Type InstructionBase::InstructionType() const { - switch (OpcodeFieldRaw()) { - case SPECIAL: - if (FunctionFieldToBitNumber(FunctionFieldRaw()) & - kFunctionFieldRegisterTypeMask) { - return kRegisterType; - } - return kUnsupported; - case SPECIAL2: - switch (FunctionFieldRaw()) { - case MUL: - case CLZ: - return kRegisterType; - default: - return kUnsupported; - } - break; - case SPECIAL3: - switch (FunctionFieldRaw()) { - case INS: - case EXT: - return kRegisterType; - case BSHFL: { - int sa = SaFieldRaw() >> kSaShift; - switch (sa) { - case BITSWAP: - case WSBH: - case SEB: - case SEH: - return kRegisterType; - } - sa >>= kBp2Bits; - switch (sa) { - case ALIGN: - return kRegisterType; - default: - return kUnsupported; - } - } - case LL_R6: - case SC_R6: { - DCHECK(IsMipsArchVariant(kMips32r6)); - return kImmediateType; - } - default: - return kUnsupported; - } - break; - case COP1: // Coprocessor instructions. - switch (RsFieldRawNoAssert()) { - case BC1: // Branch on coprocessor condition. - case BC1EQZ: - case BC1NEZ: - return kImmediateType; - // MSA Branch instructions - case BZ_V: - case BNZ_V: - case BZ_B: - case BZ_H: - case BZ_W: - case BZ_D: - case BNZ_B: - case BNZ_H: - case BNZ_W: - case BNZ_D: - return kImmediateType; - default: - return kRegisterType; - } - break; - case COP1X: - return kRegisterType; - - // 26 bits immediate type instructions. e.g.: j imm26. - case J: - case JAL: - return kJumpType; - - case MSA: - switch (MSAMinorOpcodeField()) { - case kMsaMinor3R: - case kMsaMinor3RF: - case kMsaMinorVEC: - case kMsaMinor2R: - case kMsaMinor2RF: - return kRegisterType; - case kMsaMinorELM: - switch (InstructionBits() & kMsaLongerELMMask) { - case CFCMSA: - case CTCMSA: - case MOVE_V: - return kRegisterType; - default: - return kImmediateType; - } - default: - return kImmediateType; - } - - default: - return kImmediateType; - } -} - -#undef OpcodeToBitNumber -#undef FunctionFieldToBitNumber - -// ----------------------------------------------------------------------------- -// Instructions. - -template -bool InstructionGetters

::IsLinkingInstruction() const { - uint32_t op = this->OpcodeFieldRaw(); - switch (op) { - case JAL: - return true; - case POP76: - if (this->RsFieldRawNoAssert() == JIALC) - return true; // JIALC - else - return false; // BNEZC - case REGIMM: - switch (this->RtFieldRaw()) { - case BGEZAL: - case BLTZAL: - return true; - default: - return false; - } - case SPECIAL: - switch (this->FunctionFieldRaw()) { - case JALR: - return true; - default: - return false; - } - default: - return false; - } -} - -template -bool InstructionGetters

::IsTrap() const { - if (this->OpcodeFieldRaw() != SPECIAL) { - return false; - } else { - switch (this->FunctionFieldRaw()) { - case BREAK: - case TGE: - case TGEU: - case TLT: - case TLTU: - case TEQ: - case TNE: - return true; - default: - return false; - } - } -} - -// static -template -bool InstructionGetters::IsForbiddenAfterBranchInstr(Instr instr) { - Opcode opcode = static_cast(instr & kOpcodeMask); - switch (opcode) { - case J: - case JAL: - case BEQ: - case BNE: - case BLEZ: // POP06 bgeuc/bleuc, blezalc, bgezalc - case BGTZ: // POP07 bltuc/bgtuc, bgtzalc, bltzalc - case BEQL: - case BNEL: - case BLEZL: // POP26 bgezc, blezc, bgec/blec - case BGTZL: // POP27 bgtzc, bltzc, bltc/bgtc - case BC: - case BALC: - case POP10: // beqzalc, bovc, beqc - case POP30: // bnezalc, bnvc, bnec - case POP66: // beqzc, jic - case POP76: // bnezc, jialc - return true; - case REGIMM: - switch (instr & kRtFieldMask) { - case BLTZ: - case BGEZ: - case BLTZAL: - case BGEZAL: - return true; - default: - return false; - } - break; - case SPECIAL: - switch (instr & kFunctionFieldMask) { - case JR: - case JALR: - return true; - default: - return false; - } - break; - case COP1: - switch (instr & kRsFieldMask) { - case BC1: - case BC1EQZ: - case BC1NEZ: - case BZ_V: - case BZ_B: - case BZ_H: - case BZ_W: - case BZ_D: - case BNZ_V: - case BNZ_B: - case BNZ_H: - case BNZ_W: - case BNZ_D: - return true; - break; - default: - return false; - } - break; - default: - return false; - } -} -} // namespace internal -} // namespace v8 - -#endif // V8_CODEGEN_MIPS_CONSTANTS_MIPS_H_ diff --git a/src/codegen/mips/cpu-mips.cc b/src/codegen/mips/cpu-mips.cc deleted file mode 100644 index a7120d1c7a..0000000000 --- a/src/codegen/mips/cpu-mips.cc +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// CPU specific code for arm independent of OS goes here. - -#include -#include - -#ifdef __mips -#include -#endif // #ifdef __mips - -#if V8_TARGET_ARCH_MIPS - -#include "src/codegen/cpu-features.h" - -namespace v8 { -namespace internal { - -void CpuFeatures::FlushICache(void* start, size_t size) { -#if !defined(USE_SIMULATOR) - // Nothing to do, flushing no instructions. - if (size == 0) { - return; - } - -#if defined(ANDROID) - // Bionic cacheflush can typically run in userland, avoiding kernel call. - char* end = reinterpret_cast(start) + size; - cacheflush(reinterpret_cast(start), reinterpret_cast(end), - 0); -#else // ANDROID - int res; - // See http://www.linux-mips.org/wiki/Cacheflush_Syscall. - res = syscall(__NR_cacheflush, start, size, ICACHE); - if (res) FATAL("Failed to flush the instruction cache"); -#endif // ANDROID -#endif // !USE_SIMULATOR. -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS diff --git a/src/codegen/mips/interface-descriptors-mips-inl.h b/src/codegen/mips/interface-descriptors-mips-inl.h deleted file mode 100644 index c091859955..0000000000 --- a/src/codegen/mips/interface-descriptors-mips-inl.h +++ /dev/null @@ -1,314 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_ -#define V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_ - -#if V8_TARGET_ARCH_MIPS - -#include "src/codegen/interface-descriptors.h" -#include "src/execution/frames.h" - -namespace v8 { -namespace internal { - -constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() { - auto registers = RegisterArray(a0, a1, a2, a3, t0); - static_assert(registers.size() == kMaxBuiltinRegisterParams); - return registers; -} - -#if DEBUG -template -void StaticCallInterfaceDescriptor:: - VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) { - RegList allocatable_regs = data->allocatable_registers(); - if (argc >= 1) DCHECK(allocatable_regs.has(a0)); - if (argc >= 2) DCHECK(allocatable_regs.has(a1)); - if (argc >= 3) DCHECK(allocatable_regs.has(a2)); - if (argc >= 4) DCHECK(allocatable_regs.has(a3)); - // Additional arguments are passed on the stack. -} -#endif // DEBUG - -// static -constexpr auto WriteBarrierDescriptor::registers() { - return RegisterArray(a1, t1, t0, a0, a2, v0, a3, kContextRegister); -} - -// static -constexpr Register LoadDescriptor::ReceiverRegister() { return a1; } -// static -constexpr Register LoadDescriptor::NameRegister() { return a2; } -// static -constexpr Register LoadDescriptor::SlotRegister() { return a0; } - -// static -constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; } - -// static -constexpr Register KeyedLoadBaselineDescriptor::ReceiverRegister() { - return a1; -} -// static -constexpr Register KeyedLoadBaselineDescriptor::NameRegister() { - return kInterpreterAccumulatorRegister; -} -// static -constexpr Register KeyedLoadBaselineDescriptor::SlotRegister() { return a2; } - -// static -constexpr Register KeyedLoadWithVectorDescriptor::VectorRegister() { - return a3; -} - -// static -constexpr Register KeyedHasICBaselineDescriptor::ReceiverRegister() { - return kInterpreterAccumulatorRegister; -} -// static -constexpr Register KeyedHasICBaselineDescriptor::NameRegister() { return a1; } -// static -constexpr Register KeyedHasICBaselineDescriptor::SlotRegister() { return a2; } - -// static -constexpr Register KeyedHasICWithVectorDescriptor::VectorRegister() { - return a3; -} - -// static -constexpr Register -LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() { - return t0; -} - -// static -constexpr Register StoreDescriptor::ReceiverRegister() { return a1; } -// static -constexpr Register StoreDescriptor::NameRegister() { return a2; } -// static -constexpr Register StoreDescriptor::ValueRegister() { return a0; } -// static -constexpr Register StoreDescriptor::SlotRegister() { return t0; } - -// static -constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; } - -// static -constexpr Register StoreTransitionDescriptor::MapRegister() { return t1; } - -// static -constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; } -// static -constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; } - -// static -constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; } -// static -constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; } - -// static -constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { - return a2; -} - -// static -constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { - // TODO(v8:11421): Implement on this platform. - return a3; -} - -// static -constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; } - -// static -constexpr auto TypeofDescriptor::registers() { return RegisterArray(a0); } - -// static -constexpr auto CallTrampolineDescriptor::registers() { - // a1: target - // a0: number of arguments - return RegisterArray(a1, a0); -} - -// static -constexpr auto CopyDataPropertiesWithExcludedPropertiesDescriptor::registers() { - // a1 : the source - // a0 : the excluded property count - return RegisterArray(a1, a0); -} - -// static -constexpr auto -CopyDataPropertiesWithExcludedPropertiesOnStackDescriptor::registers() { - // a1 : the source - // a0 : the excluded property count - // a2 : the excluded property base - return RegisterArray(a1, a0, a2); -} - -// static -constexpr auto CallVarargsDescriptor::registers() { - // a0 : number of arguments (on the stack) - // a1 : the target to call - // t0 : arguments list length (untagged) - // a2 : arguments list (FixedArray) - return RegisterArray(a1, a0, t0, a2); -} - -// static -constexpr auto CallForwardVarargsDescriptor::registers() { - // a1: the target to call - // a0: number of arguments - // a2: start index (to support rest parameters) - return RegisterArray(a1, a0, a2); -} - -// static -constexpr auto CallFunctionTemplateDescriptor::registers() { - // a1 : function template info - // a0 : number of arguments (on the stack) - return RegisterArray(a1, a0); -} - -// static -constexpr auto CallWithSpreadDescriptor::registers() { - // a0 : number of arguments (on the stack) - // a1 : the target to call - // a2 : the object to spread - return RegisterArray(a1, a0, a2); -} - -// static -constexpr auto CallWithArrayLikeDescriptor::registers() { - // a1 : the target to call - // a2 : the arguments list - return RegisterArray(a1, a2); -} - -// static -constexpr auto ConstructVarargsDescriptor::registers() { - // a0 : number of arguments (on the stack) - // a1 : the target to call - // a3 : the new target - // t0 : arguments list length (untagged) - // a2 : arguments list (FixedArray) - return RegisterArray(a1, a3, a0, t0, a2); -} - -// static -constexpr auto ConstructForwardVarargsDescriptor::registers() { - // a1: the target to call - // a3: new target - // a0: number of arguments - // a2: start index (to support rest parameters) - return RegisterArray(a1, a3, a0, a2); -} - -// static -constexpr auto ConstructWithSpreadDescriptor::registers() { - // a0 : number of arguments (on the stack) - // a1 : the target to call - // a3 : the new target - // a2 : the object to spread - return RegisterArray(a1, a3, a0, a2); -} - -// static -constexpr auto ConstructWithArrayLikeDescriptor::registers() { - // a1 : the target to call - // a3 : the new target - // a2 : the arguments list - return RegisterArray(a1, a3, a2); -} - -// static -constexpr auto ConstructStubDescriptor::registers() { - // a1: target - // a3: new target - // a0: number of arguments - return RegisterArray(a1, a3, a0); -} - -// static -constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); } - -// static -constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); } - -// static -constexpr auto Compare_BaselineDescriptor::registers() { - // a1: left operand - // a0: right operand - // a2: feedback slot - return RegisterArray(a1, a0, a2); -} - -// static -constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); } - -// static -constexpr auto BinaryOp_BaselineDescriptor::registers() { - // TODO(v8:11421): Implement on this platform. - return RegisterArray(a1, a0, a2); -} - -// static -constexpr auto BinarySmiOp_BaselineDescriptor::registers() { - // TODO(v8:11421): Implement on this platform. - return RegisterArray(a0, a1, a2); -} - -// static -constexpr auto ApiCallbackDescriptor::registers() { - // a1 : kApiFunctionAddress - // a2 : kArgc - // a3 : kCallData - // a0 : kHolder - return RegisterArray(a1, a2, a3, a0); -} - -// static -constexpr auto InterpreterDispatchDescriptor::registers() { - return RegisterArray( - kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister); -} - -// static -constexpr auto InterpreterPushArgsThenCallDescriptor::registers() { - // a0 : argument count - // a2 : address of first argument - // a1 : the target callable to be call - return RegisterArray(a0, a2, a1); -} - -// static -constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() { - // a0 : argument count - // t4 : address of the first argument - // a1 : constructor to call - // a3 : new target - // a2 : allocation site feedback if available, undefined otherwise - return RegisterArray(a0, t4, a1, a3, a2); -} - -// static -constexpr auto ResumeGeneratorDescriptor::registers() { - // v0 : the value to pass to the generator - // a1 : the JSGeneratorObject to resume - return RegisterArray(v0, a1); -} - -// static -constexpr auto RunMicrotasksEntryDescriptor::registers() { - return RegisterArray(a0, a1); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS - -#endif // V8_CODEGEN_MIPS_INTERFACE_DESCRIPTORS_MIPS_INL_H_ diff --git a/src/codegen/mips/macro-assembler-mips.cc b/src/codegen/mips/macro-assembler-mips.cc deleted file mode 100644 index 6eba156d2d..0000000000 --- a/src/codegen/mips/macro-assembler-mips.cc +++ /dev/null @@ -1,5655 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include // For LONG_MIN, LONG_MAX. - -#if V8_TARGET_ARCH_MIPS - -#include "src/base/bits.h" -#include "src/base/division-by-constant.h" -#include "src/codegen/assembler-inl.h" -#include "src/codegen/callable.h" -#include "src/codegen/code-factory.h" -#include "src/codegen/external-reference-table.h" -#include "src/codegen/interface-descriptors-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/codegen/register-configuration.h" -#include "src/debug/debug.h" -#include "src/deoptimizer/deoptimizer.h" -#include "src/execution/frames-inl.h" -#include "src/heap/memory-chunk.h" -#include "src/init/bootstrapper.h" -#include "src/logging/counters.h" -#include "src/objects/heap-number.h" -#include "src/runtime/runtime.h" -#include "src/snapshot/snapshot.h" - -#if V8_ENABLE_WEBASSEMBLY -#include "src/wasm/wasm-code-manager.h" -#endif // V8_ENABLE_WEBASSEMBLY - -// Satisfy cpplint check, but don't include platform-specific header. It is -// included recursively via macro-assembler.h. -#if 0 -#include "src/codegen/mips/macro-assembler-mips.h" -#endif - -namespace v8 { -namespace internal { - -static inline bool IsZero(const Operand& rt) { - if (rt.is_reg()) { - return rt.rm() == zero_reg; - } else { - return rt.immediate() == 0; - } -} - -int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, - Register exclusion1, - Register exclusion2, - Register exclusion3) const { - int bytes = 0; - - RegList exclusions = {exclusion1, exclusion2, exclusion3}; - RegList list = kJSCallerSaved - exclusions; - bytes += list.Count() * kPointerSize; - - if (fp_mode == SaveFPRegsMode::kSave) { - bytes += kCallerSavedFPU.Count() * kDoubleSize; - } - - return bytes; -} - -int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, - Register exclusion2, Register exclusion3) { - ASM_CODE_COMMENT(this); - int bytes = 0; - - RegList exclusions = {exclusion1, exclusion2, exclusion3}; - RegList list = kJSCallerSaved - exclusions; - MultiPush(list); - bytes += list.Count() * kPointerSize; - - if (fp_mode == SaveFPRegsMode::kSave) { - MultiPushFPU(kCallerSavedFPU); - bytes += kCallerSavedFPU.Count() * kDoubleSize; - } - - return bytes; -} - -int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, - Register exclusion2, Register exclusion3) { - ASM_CODE_COMMENT(this); - int bytes = 0; - if (fp_mode == SaveFPRegsMode::kSave) { - MultiPopFPU(kCallerSavedFPU); - bytes += kCallerSavedFPU.Count() * kDoubleSize; - } - - RegList exclusions = {exclusion1, exclusion2, exclusion3}; - RegList list = kJSCallerSaved - exclusions; - MultiPop(list); - bytes += list.Count() * kPointerSize; - - return bytes; -} - -void TurboAssembler::LoadRoot(Register destination, RootIndex index) { - lw(destination, - MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); -} - -void TurboAssembler::LoadRoot(Register destination, RootIndex index, - Condition cond, Register src1, - const Operand& src2) { - Branch(2, NegateCondition(cond), src1, src2); - lw(destination, - MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index))); -} - -void TurboAssembler::PushCommonFrame(Register marker_reg) { - if (marker_reg.is_valid()) { - Push(ra, fp, marker_reg); - Addu(fp, sp, Operand(kPointerSize)); - } else { - Push(ra, fp); - mov(fp, sp); - } -} - -void TurboAssembler::PushStandardFrame(Register function_reg) { - int offset = -StandardFrameConstants::kContextOffset; - if (function_reg.is_valid()) { - Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister); - offset += 2 * kPointerSize; - } else { - Push(ra, fp, cp, kJavaScriptCallArgCountRegister); - offset += kPointerSize; - } - Addu(fp, sp, Operand(offset)); -} - -// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved) -// The register 'object' contains a heap object pointer. The heap object -// tag is shifted away. -void MacroAssembler::RecordWriteField(Register object, int offset, - Register value, Register dst, - RAStatus ra_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - ASM_CODE_COMMENT(this); - DCHECK(!AreAliased(value, dst, t8, object)); - // First, check if a write barrier is even needed. The tests below - // catch stores of Smis. - Label done; - - // Skip barrier if writing a smi. - if (smi_check == SmiCheck::kInline) { - JumpIfSmi(value, &done); - } - - // Although the object register is tagged, the offset is relative to the start - // of the object, so offset must be a multiple of kPointerSize. - DCHECK(IsAligned(offset, kPointerSize)); - - Addu(dst, object, Operand(offset - kHeapObjectTag)); - if (v8_flags.debug_code) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Label ok; - And(t8, dst, Operand(kPointerSize - 1)); - Branch(&ok, eq, t8, Operand(zero_reg)); - stop(); - bind(&ok); - } - - RecordWrite(object, dst, value, ra_status, save_fp, remembered_set_action, - SmiCheck::kOmit); - - bind(&done); - - // Clobber clobbered input registers when running with the debug-code flag - // turned on to provoke errors. - if (v8_flags.debug_code) { - li(value, Operand(base::bit_cast(kZapValue + 4))); - li(dst, Operand(base::bit_cast(kZapValue + 8))); - } -} - -void TurboAssembler::MaybeSaveRegisters(RegList registers) { - if (registers.is_empty()) return; - MultiPush(registers); -} - -void TurboAssembler::MaybeRestoreRegisters(RegList registers) { - if (registers.is_empty()) return; - MultiPop(registers); -} - -void TurboAssembler::CallEphemeronKeyBarrier(Register object, - Register slot_address, - SaveFPRegsMode fp_mode) { - ASM_CODE_COMMENT(this); - DCHECK(!AreAliased(object, slot_address)); - RegList registers = - WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); - MaybeSaveRegisters(registers); - - Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); - Register slot_address_parameter = - WriteBarrierDescriptor::SlotAddressRegister(); - - Push(object); - Push(slot_address); - Pop(slot_address_parameter); - Pop(object_parameter); - - Call(isolate()->builtins()->code_handle( - Builtins::GetEphemeronKeyBarrierStub(fp_mode)), - RelocInfo::CODE_TARGET); - MaybeRestoreRegisters(registers); -} - -void TurboAssembler::CallRecordWriteStubSaveRegisters( - Register object, Register slot_address, - RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, - StubCallMode mode) { - DCHECK(!AreAliased(object, slot_address)); - RegList registers = - WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address); - MaybeSaveRegisters(registers); - - Register object_parameter = WriteBarrierDescriptor::ObjectRegister(); - Register slot_address_parameter = - WriteBarrierDescriptor::SlotAddressRegister(); - - Push(object); - Push(slot_address); - Pop(slot_address_parameter); - Pop(object_parameter); - - CallRecordWriteStub(object_parameter, slot_address_parameter, - remembered_set_action, fp_mode, mode); - - MaybeRestoreRegisters(registers); -} - -void TurboAssembler::CallRecordWriteStub( - Register object, Register slot_address, - RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, - StubCallMode mode) { - // Use CallRecordWriteStubSaveRegisters if the object and slot registers - // need to be caller saved. - DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object); - DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address); -#if V8_ENABLE_WEBASSEMBLY - if (mode == StubCallMode::kCallWasmRuntimeStub) { - auto wasm_target = - wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode); - Call(wasm_target, RelocInfo::WASM_STUB_CALL); -#else - if (false) { -#endif - } else { - Builtin builtin = - Builtins::GetRecordWriteStub(remembered_set_action, fp_mode); - if (options().inline_offheap_trampolines) { - // Inline the trampoline. - RecordCommentForOffHeapTrampoline(builtin); - li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); - Call(t9); - RecordComment("]"); - } else { - Handle code_target = isolate()->builtins()->code_handle(builtin); - Call(code_target, RelocInfo::CODE_TARGET); - } - } -} - -// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved) -// The register 'object' contains a heap object pointer. The heap object -// tag is shifted away. -void MacroAssembler::RecordWrite(Register object, Register address, - Register value, RAStatus ra_status, - SaveFPRegsMode fp_mode, - RememberedSetAction remembered_set_action, - SmiCheck smi_check) { - DCHECK(!AreAliased(object, address, value, t8)); - DCHECK(!AreAliased(object, address, value, t9)); - - if (v8_flags.debug_code) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(!AreAliased(object, value, scratch)); - lw(scratch, MemOperand(address)); - Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch, - Operand(value)); - } - - if ((remembered_set_action == RememberedSetAction::kOmit && - !v8_flags.incremental_marking) || - v8_flags.disable_write_barriers) { - return; - } - - // First, check if a write barrier is even needed. The tests below - // catch stores of smis and stores into the young generation. - Label done; - - if (smi_check == SmiCheck::kInline) { - DCHECK_EQ(0, kSmiTag); - JumpIfSmi(value, &done); - } - - CheckPageFlag(value, - value, // Used as scratch. - MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); - CheckPageFlag(object, - value, // Used as scratch. - MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); - - // Record the actual write. - if (ra_status == kRAHasNotBeenSaved) { - push(ra); - } - - Register slot_address = WriteBarrierDescriptor::SlotAddressRegister(); - DCHECK(!AreAliased(object, slot_address, value)); - mov(slot_address, address); - CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode); - - if (ra_status == kRAHasNotBeenSaved) { - pop(ra); - } - - bind(&done); - - // Clobber clobbered registers when running with the debug-code flag - // turned on to provoke errors. - if (v8_flags.debug_code) { - li(address, Operand(base::bit_cast(kZapValue + 12))); - li(value, Operand(base::bit_cast(kZapValue + 16))); - li(slot_address, Operand(base::bit_cast(kZapValue + 20))); - } -} - -// --------------------------------------------------------------------------- -// Instruction macros. - -void TurboAssembler::Addu(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - addu(rd, rs, rt.rm()); - } else { - if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) { - addiu(rd, rs, rt.immediate()); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - addu(rd, rs, scratch); - } - } -} - -void TurboAssembler::Subu(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - subu(rd, rs, rt.rm()); - } else { - if (is_int16(-rt.immediate()) && !MustUseReg(rt.rmode())) { - addiu(rd, rs, -rt.immediate()); // No subiu instr, use addiu(x, y, -imm). - } else if (!(-rt.immediate() & kHiMask) && - !MustUseReg(rt.rmode())) { // Use load - // -imm and addu for cases where loading -imm generates one instruction. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, -rt.immediate()); - addu(rd, rs, scratch); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - subu(rd, rs, scratch); - } - } -} - -void TurboAssembler::Mul(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - if (IsMipsArchVariant(kLoongson)) { - mult(rs, rt.rm()); - mflo(rd); - } else { - mul(rd, rs, rt.rm()); - } - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - if (IsMipsArchVariant(kLoongson)) { - mult(rs, scratch); - mflo(rd); - } else { - mul(rd, rs, scratch); - } - } -} - -void TurboAssembler::Mul(Register rd_hi, Register rd_lo, Register rs, - const Operand& rt) { - if (rt.is_reg()) { - if (!IsMipsArchVariant(kMips32r6)) { - mult(rs, rt.rm()); - mflo(rd_lo); - mfhi(rd_hi); - } else { - if (rd_lo == rs) { - DCHECK(rd_hi != rs); - DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm()); - muh(rd_hi, rs, rt.rm()); - mul(rd_lo, rs, rt.rm()); - } else { - DCHECK(rd_hi != rt.rm() && rd_lo != rt.rm()); - mul(rd_lo, rs, rt.rm()); - muh(rd_hi, rs, rt.rm()); - } - } - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - if (!IsMipsArchVariant(kMips32r6)) { - mult(rs, scratch); - mflo(rd_lo); - mfhi(rd_hi); - } else { - if (rd_lo == rs) { - DCHECK(rd_hi != rs); - DCHECK(rd_hi != scratch && rd_lo != scratch); - muh(rd_hi, rs, scratch); - mul(rd_lo, rs, scratch); - } else { - DCHECK(rd_hi != scratch && rd_lo != scratch); - mul(rd_lo, rs, scratch); - muh(rd_hi, rs, scratch); - } - } - } -} - -void TurboAssembler::Mulu(Register rd_hi, Register rd_lo, Register rs, - const Operand& rt) { - Register reg = no_reg; - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - if (rt.is_reg()) { - reg = rt.rm(); - } else { - DCHECK(rs != scratch); - reg = scratch; - li(reg, rt); - } - - if (!IsMipsArchVariant(kMips32r6)) { - multu(rs, reg); - mflo(rd_lo); - mfhi(rd_hi); - } else { - if (rd_lo == rs) { - DCHECK(rd_hi != rs); - DCHECK(rd_hi != reg && rd_lo != reg); - muhu(rd_hi, rs, reg); - mulu(rd_lo, rs, reg); - } else { - DCHECK(rd_hi != reg && rd_lo != reg); - mulu(rd_lo, rs, reg); - muhu(rd_hi, rs, reg); - } - } -} - -void TurboAssembler::Mulh(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - if (!IsMipsArchVariant(kMips32r6)) { - mult(rs, rt.rm()); - mfhi(rd); - } else { - muh(rd, rs, rt.rm()); - } - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - if (!IsMipsArchVariant(kMips32r6)) { - mult(rs, scratch); - mfhi(rd); - } else { - muh(rd, rs, scratch); - } - } -} - -void TurboAssembler::Mult(Register rs, const Operand& rt) { - if (rt.is_reg()) { - mult(rs, rt.rm()); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - mult(rs, scratch); - } -} - -void TurboAssembler::Mulhu(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - if (!IsMipsArchVariant(kMips32r6)) { - multu(rs, rt.rm()); - mfhi(rd); - } else { - muhu(rd, rs, rt.rm()); - } - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - if (!IsMipsArchVariant(kMips32r6)) { - multu(rs, scratch); - mfhi(rd); - } else { - muhu(rd, rs, scratch); - } - } -} - -void TurboAssembler::Multu(Register rs, const Operand& rt) { - if (rt.is_reg()) { - multu(rs, rt.rm()); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - multu(rs, scratch); - } -} - -void TurboAssembler::Div(Register rs, const Operand& rt) { - if (rt.is_reg()) { - div(rs, rt.rm()); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - div(rs, scratch); - } -} - -void TurboAssembler::Div(Register rem, Register res, Register rs, - const Operand& rt) { - if (rt.is_reg()) { - if (!IsMipsArchVariant(kMips32r6)) { - div(rs, rt.rm()); - mflo(res); - mfhi(rem); - } else { - div(res, rs, rt.rm()); - mod(rem, rs, rt.rm()); - } - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - if (!IsMipsArchVariant(kMips32r6)) { - div(rs, scratch); - mflo(res); - mfhi(rem); - } else { - div(res, rs, scratch); - mod(rem, rs, scratch); - } - } -} - -void TurboAssembler::Div(Register res, Register rs, const Operand& rt) { - if (rt.is_reg()) { - if (!IsMipsArchVariant(kMips32r6)) { - div(rs, rt.rm()); - mflo(res); - } else { - div(res, rs, rt.rm()); - } - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - if (!IsMipsArchVariant(kMips32r6)) { - div(rs, scratch); - mflo(res); - } else { - div(res, rs, scratch); - } - } -} - -void TurboAssembler::Mod(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - if (!IsMipsArchVariant(kMips32r6)) { - div(rs, rt.rm()); - mfhi(rd); - } else { - mod(rd, rs, rt.rm()); - } - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - if (!IsMipsArchVariant(kMips32r6)) { - div(rs, scratch); - mfhi(rd); - } else { - mod(rd, rs, scratch); - } - } -} - -void TurboAssembler::Modu(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - if (!IsMipsArchVariant(kMips32r6)) { - divu(rs, rt.rm()); - mfhi(rd); - } else { - modu(rd, rs, rt.rm()); - } - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - if (!IsMipsArchVariant(kMips32r6)) { - divu(rs, scratch); - mfhi(rd); - } else { - modu(rd, rs, scratch); - } - } -} - -void TurboAssembler::Divu(Register rs, const Operand& rt) { - if (rt.is_reg()) { - divu(rs, rt.rm()); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - divu(rs, scratch); - } -} - -void TurboAssembler::Divu(Register res, Register rs, const Operand& rt) { - if (rt.is_reg()) { - if (!IsMipsArchVariant(kMips32r6)) { - divu(rs, rt.rm()); - mflo(res); - } else { - divu(res, rs, rt.rm()); - } - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - if (!IsMipsArchVariant(kMips32r6)) { - divu(rs, scratch); - mflo(res); - } else { - divu(res, rs, scratch); - } - } -} - -void TurboAssembler::And(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - and_(rd, rs, rt.rm()); - } else { - if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) { - andi(rd, rs, rt.immediate()); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - and_(rd, rs, scratch); - } - } -} - -void TurboAssembler::Or(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - or_(rd, rs, rt.rm()); - } else { - if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) { - ori(rd, rs, rt.immediate()); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - or_(rd, rs, scratch); - } - } -} - -void TurboAssembler::Xor(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - xor_(rd, rs, rt.rm()); - } else { - if (is_uint16(rt.immediate()) && !MustUseReg(rt.rmode())) { - xori(rd, rs, rt.immediate()); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - xor_(rd, rs, scratch); - } - } -} - -void TurboAssembler::Nor(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - nor(rd, rs, rt.rm()); - } else { - // li handles the relocation. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - nor(rd, rs, scratch); - } -} - -void TurboAssembler::Neg(Register rs, const Operand& rt) { - subu(rs, zero_reg, rt.rm()); -} - -void TurboAssembler::Slt(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - slt(rd, rs, rt.rm()); - } else { - if (is_int16(rt.immediate()) && !MustUseReg(rt.rmode())) { - slti(rd, rs, rt.immediate()); - } else { - // li handles the relocation. - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = rd == at ? t8 : temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - slt(rd, rs, scratch); - } - } -} - -void TurboAssembler::Sltu(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - sltu(rd, rs, rt.rm()); - } else { - const uint32_t int16_min = std::numeric_limits::min(); - if (is_uint15(rt.immediate()) && !MustUseReg(rt.rmode())) { - // Imm range is: [0, 32767]. - sltiu(rd, rs, rt.immediate()); - } else if (is_uint15(rt.immediate() - int16_min) && - !MustUseReg(rt.rmode())) { - // Imm range is: [max_unsigned-32767,max_unsigned]. - sltiu(rd, rs, static_cast(rt.immediate())); - } else { - // li handles the relocation. - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = rd == at ? t8 : temps.Acquire(); - DCHECK(rs != scratch); - li(scratch, rt); - sltu(rd, rs, scratch); - } - } -} - -void TurboAssembler::Sle(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - slt(rd, rt.rm(), rs); - } else { - // li handles the relocation. - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; - DCHECK(rs != scratch); - li(scratch, rt); - slt(rd, scratch, rs); - } - xori(rd, rd, 1); -} - -void TurboAssembler::Sleu(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - sltu(rd, rt.rm(), rs); - } else { - // li handles the relocation. - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; - DCHECK(rs != scratch); - li(scratch, rt); - sltu(rd, scratch, rs); - } - xori(rd, rd, 1); -} - -void TurboAssembler::Sge(Register rd, Register rs, const Operand& rt) { - Slt(rd, rs, rt); - xori(rd, rd, 1); -} - -void TurboAssembler::Sgeu(Register rd, Register rs, const Operand& rt) { - Sltu(rd, rs, rt); - xori(rd, rd, 1); -} - -void TurboAssembler::Sgt(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - slt(rd, rt.rm(), rs); - } else { - // li handles the relocation. - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; - DCHECK(rs != scratch); - li(scratch, rt); - slt(rd, scratch, rs); - } -} - -void TurboAssembler::Sgtu(Register rd, Register rs, const Operand& rt) { - if (rt.is_reg()) { - sltu(rd, rt.rm(), rs); - } else { - // li handles the relocation. - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; - DCHECK(rs != scratch); - li(scratch, rt); - sltu(rd, scratch, rs); - } -} - -void TurboAssembler::Ror(Register rd, Register rs, const Operand& rt) { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - if (rt.is_reg()) { - rotrv(rd, rs, rt.rm()); - } else { - rotr(rd, rs, rt.immediate() & 0x1F); - } - } else { - if (rt.is_reg()) { - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; - subu(scratch, zero_reg, rt.rm()); - sllv(scratch, rs, scratch); - srlv(rd, rs, rt.rm()); - or_(rd, rd, scratch); - } else { - if (rt.immediate() == 0) { - srl(rd, rs, 0); - } else { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - srl(scratch, rs, rt.immediate() & 0x1F); - sll(rd, rs, (0x20 - (rt.immediate() & 0x1F)) & 0x1F); - or_(rd, rd, scratch); - } - } - } -} - -void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { - if (IsMipsArchVariant(kLoongson)) { - lw(zero_reg, rs); - } else { - pref(hint, rs); - } -} - -void TurboAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa, - Register scratch) { - DCHECK(sa >= 1 && sa <= 31); - if (IsMipsArchVariant(kMips32r6) && sa <= 4) { - lsa(rd, rt, rs, sa - 1); - } else { - Register tmp = rd == rt ? scratch : rd; - DCHECK(tmp != rt); - sll(tmp, rs, sa); - Addu(rd, rt, tmp); - } -} - -void TurboAssembler::Bovc(Register rs, Register rt, Label* L) { - if (is_trampoline_emitted()) { - Label skip; - bnvc(rs, rt, &skip); - BranchLong(L, PROTECT); - bind(&skip); - } else { - bovc(rs, rt, L); - } -} - -void TurboAssembler::Bnvc(Register rs, Register rt, Label* L) { - if (is_trampoline_emitted()) { - Label skip; - bovc(rs, rt, &skip); - BranchLong(L, PROTECT); - bind(&skip); - } else { - bnvc(rs, rt, L); - } -} - -// ------------Pseudo-instructions------------- - -// Word Swap Byte -void TurboAssembler::ByteSwapSigned(Register dest, Register src, - int operand_size) { - DCHECK(operand_size == 2 || operand_size == 4); - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - if (operand_size == 2) { - wsbh(dest, src); - seh(dest, dest); - } else { - wsbh(dest, src); - rotr(dest, dest, 16); - } - } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { - if (operand_size == 2) { - DCHECK(src != at && dest != at); - srl(at, src, 8); - andi(at, at, 0xFF); - sll(dest, src, 8); - or_(dest, dest, at); - - // Sign-extension - sll(dest, dest, 16); - sra(dest, dest, 16); - } else { - BlockTrampolinePoolScope block_trampoline_pool(this); - Register tmp = at; - Register tmp2 = t8; - DCHECK(dest != tmp && dest != tmp2); - DCHECK(src != tmp && src != tmp2); - - andi(tmp2, src, 0xFF); - sll(tmp, tmp2, 24); - - andi(tmp2, src, 0xFF00); - sll(tmp2, tmp2, 8); - or_(tmp, tmp, tmp2); - - srl(tmp2, src, 8); - andi(tmp2, tmp2, 0xFF00); - or_(tmp, tmp, tmp2); - - srl(tmp2, src, 24); - or_(dest, tmp, tmp2); - } - } -} - -void TurboAssembler::ByteSwapUnsigned(Register dest, Register src, - int operand_size) { - DCHECK_EQ(operand_size, 2); - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - wsbh(dest, src); - andi(dest, dest, 0xFFFF); - } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { - DCHECK(src != at && dest != at); - srl(at, src, 8); - andi(at, at, 0xFF); - sll(dest, src, 8); - or_(dest, dest, at); - - // Zero-extension - andi(dest, dest, 0xFFFF); - } -} - -void TurboAssembler::Ulw(Register rd, const MemOperand& rs) { - DCHECK(rd != at); - DCHECK(rs.rm() != at); - if (IsMipsArchVariant(kMips32r6)) { - lw(rd, rs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - DCHECK(kMipsLwrOffset <= 3 && kMipsLwlOffset <= 3); - MemOperand source = rs; - // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); - if (rd != source.rm()) { - lwr(rd, MemOperand(source.rm(), source.offset() + kMipsLwrOffset)); - lwl(rd, MemOperand(source.rm(), source.offset() + kMipsLwlOffset)); - } else { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - lwr(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); - lwl(scratch, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); - mov(rd, scratch); - } - } -} - -void TurboAssembler::Usw(Register rd, const MemOperand& rs) { - DCHECK(rd != at); - DCHECK(rs.rm() != at); - DCHECK(rd != rs.rm()); - if (IsMipsArchVariant(kMips32r6)) { - sw(rd, rs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - DCHECK(kMipsSwrOffset <= 3 && kMipsSwlOffset <= 3); - MemOperand source = rs; - // Adjust offset for two accesses and check if offset + 3 fits into int16_t. - AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 3); - swr(rd, MemOperand(source.rm(), source.offset() + kMipsSwrOffset)); - swl(rd, MemOperand(source.rm(), source.offset() + kMipsSwlOffset)); - } -} - -void TurboAssembler::Ulh(Register rd, const MemOperand& rs) { - DCHECK(rd != at); - DCHECK(rs.rm() != at); - if (IsMipsArchVariant(kMips32r6)) { - lh(rd, rs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - MemOperand source = rs; - // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - if (source.rm() == scratch) { -#if defined(V8_TARGET_LITTLE_ENDIAN) - lb(rd, MemOperand(source.rm(), source.offset() + 1)); - lbu(scratch, source); -#elif defined(V8_TARGET_BIG_ENDIAN) - lb(rd, source); - lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); -#endif - } else { -#if defined(V8_TARGET_LITTLE_ENDIAN) - lbu(scratch, source); - lb(rd, MemOperand(source.rm(), source.offset() + 1)); -#elif defined(V8_TARGET_BIG_ENDIAN) - lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); - lb(rd, source); -#endif - } - sll(rd, rd, 8); - or_(rd, rd, scratch); - } -} - -void TurboAssembler::Ulhu(Register rd, const MemOperand& rs) { - DCHECK(rd != at); - DCHECK(rs.rm() != at); - if (IsMipsArchVariant(kMips32r6)) { - lhu(rd, rs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - MemOperand source = rs; - // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - if (source.rm() == scratch) { -#if defined(V8_TARGET_LITTLE_ENDIAN) - lbu(rd, MemOperand(source.rm(), source.offset() + 1)); - lbu(scratch, source); -#elif defined(V8_TARGET_BIG_ENDIAN) - lbu(rd, source); - lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); -#endif - } else { -#if defined(V8_TARGET_LITTLE_ENDIAN) - lbu(scratch, source); - lbu(rd, MemOperand(source.rm(), source.offset() + 1)); -#elif defined(V8_TARGET_BIG_ENDIAN) - lbu(scratch, MemOperand(source.rm(), source.offset() + 1)); - lbu(rd, source); -#endif - } - sll(rd, rd, 8); - or_(rd, rd, scratch); - } -} - -void TurboAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { - DCHECK(rd != at); - DCHECK(rs.rm() != at); - DCHECK(rs.rm() != scratch); - DCHECK(scratch != at); - if (IsMipsArchVariant(kMips32r6)) { - sh(rd, rs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - MemOperand source = rs; - // Adjust offset for two accesses and check if offset + 1 fits into int16_t. - AdjustBaseAndOffset(&source, OffsetAccessType::TWO_ACCESSES, 1); - - if (scratch != rd) { - mov(scratch, rd); - } - -#if defined(V8_TARGET_LITTLE_ENDIAN) - sb(scratch, source); - srl(scratch, scratch, 8); - sb(scratch, MemOperand(source.rm(), source.offset() + 1)); -#elif defined(V8_TARGET_BIG_ENDIAN) - sb(scratch, MemOperand(source.rm(), source.offset() + 1)); - srl(scratch, scratch, 8); - sb(scratch, source); -#endif - } -} - -void TurboAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, - Register scratch) { - if (IsMipsArchVariant(kMips32r6)) { - lwc1(fd, rs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - Ulw(scratch, rs); - mtc1(scratch, fd); - } -} - -void TurboAssembler::Uswc1(FPURegister fd, const MemOperand& rs, - Register scratch) { - if (IsMipsArchVariant(kMips32r6)) { - swc1(fd, rs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - mfc1(scratch, fd); - Usw(scratch, rs); - } -} - -void TurboAssembler::Uldc1(FPURegister fd, const MemOperand& rs, - Register scratch) { - DCHECK(scratch != at); - if (IsMipsArchVariant(kMips32r6)) { - Ldc1(fd, rs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); - mtc1(scratch, fd); - Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); - Mthc1(scratch, fd); - } -} - -void TurboAssembler::Usdc1(FPURegister fd, const MemOperand& rs, - Register scratch) { - DCHECK(scratch != at); - if (IsMipsArchVariant(kMips32r6)) { - Sdc1(fd, rs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - mfc1(scratch, fd); - Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset)); - Mfhc1(scratch, fd); - Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset)); - } -} - -void TurboAssembler::Ldc1(FPURegister fd, const MemOperand& src) { - // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit - // load to two 32-bit loads. - { - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); - MemOperand tmp = src; - AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES); - lwc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); - if (IsFp32Mode()) { // fp32 mode. - FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); - lwc1(nextfpreg, - MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); - } else { - DCHECK(IsFp64Mode() || IsFpxxMode()); - // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(src.rm() != scratch); - lw(scratch, - MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); - Mthc1(scratch, fd); - } - } - CheckTrampolinePoolQuick(1); -} - -void TurboAssembler::Sdc1(FPURegister fd, const MemOperand& src) { - // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit - // store to two 32-bit stores. - { - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK(Register::kMantissaOffset <= 4 && Register::kExponentOffset <= 4); - MemOperand tmp = src; - AdjustBaseAndOffset(&tmp, OffsetAccessType::TWO_ACCESSES); - swc1(fd, MemOperand(tmp.rm(), tmp.offset() + Register::kMantissaOffset)); - if (IsFp32Mode()) { // fp32 mode. - FPURegister nextfpreg = FPURegister::from_code(fd.code() + 1); - swc1(nextfpreg, - MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); - } else { - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK(IsFp64Mode() || IsFpxxMode()); - // Currently we support FPXX and FP64 on Mips32r2 and Mips32r6 - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - DCHECK(src.rm() != t8); - Mfhc1(t8, fd); - sw(t8, MemOperand(tmp.rm(), tmp.offset() + Register::kExponentOffset)); - } - } - CheckTrampolinePoolQuick(1); -} - -void TurboAssembler::Lw(Register rd, const MemOperand& rs) { - MemOperand source = rs; - AdjustBaseAndOffset(&source); - lw(rd, source); -} - -void TurboAssembler::Sw(Register rd, const MemOperand& rs) { - MemOperand dest = rs; - AdjustBaseAndOffset(&dest); - sw(rd, dest); -} - -void TurboAssembler::Ll(Register rd, const MemOperand& rs) { - bool is_one_instruction = IsMipsArchVariant(kMips32r6) - ? is_int9(rs.offset()) - : is_int16(rs.offset()); - if (is_one_instruction) { - ll(rd, rs); - } else { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, rs.offset()); - addu(scratch, scratch, rs.rm()); - ll(rd, MemOperand(scratch, 0)); - } -} - -void TurboAssembler::Sc(Register rd, const MemOperand& rs) { - bool is_one_instruction = IsMipsArchVariant(kMips32r6) - ? is_int9(rs.offset()) - : is_int16(rs.offset()); - if (is_one_instruction) { - sc(rd, rs); - } else { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, rs.offset()); - addu(scratch, scratch, rs.rm()); - sc(rd, MemOperand(scratch, 0)); - } -} - -void TurboAssembler::li(Register dst, Handle value, LiFlags mode) { - // TODO(jgruber,v8:8887): Also consider a root-relative load when generating - // non-isolate-independent code. In many cases it might be cheaper than - // embedding the relocatable value. - if (root_array_available_ && options().isolate_independent_code) { - IndirectLoadConstant(dst, value); - return; - } - li(dst, Operand(value), mode); -} - -void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) { - // TODO(jgruber,v8:8887): Also consider a root-relative load when generating - // non-isolate-independent code. In many cases it might be cheaper than - // embedding the relocatable value. - if (root_array_available_ && options().isolate_independent_code) { - IndirectLoadExternalReference(dst, value); - return; - } - li(dst, Operand(value), mode); -} - -void TurboAssembler::li(Register dst, const StringConstantBase* string, - LiFlags mode) { - li(dst, Operand::EmbeddedStringConstant(string), mode); -} - -void TurboAssembler::li(Register rd, Operand j, LiFlags mode) { - DCHECK(!j.is_reg()); - BlockTrampolinePoolScope block_trampoline_pool(this); - if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) { - // Normal load of an immediate value which does not need Relocation Info. - if (is_int16(j.immediate())) { - addiu(rd, zero_reg, j.immediate()); - } else if (!(j.immediate() & kHiMask)) { - ori(rd, zero_reg, j.immediate()); - } else { - lui(rd, (j.immediate() >> kLuiShift) & kImm16Mask); - if (j.immediate() & kImm16Mask) { - ori(rd, rd, (j.immediate() & kImm16Mask)); - } - } - } else { - int32_t immediate; - if (j.IsHeapObjectRequest()) { - RequestHeapObject(j.heap_object_request()); - immediate = 0; - } else { - immediate = j.immediate(); - } - - if (MustUseReg(j.rmode())) { - RecordRelocInfo(j.rmode(), immediate); - } - // We always need the same number of instructions as we may need to patch - // this code to load another value which may need 2 instructions to load. - - lui(rd, (immediate >> kLuiShift) & kImm16Mask); - ori(rd, rd, (immediate & kImm16Mask)); - } -} - -void TurboAssembler::MultiPush(RegList regs) { - int16_t num_to_push = regs.Count(); - int16_t stack_offset = num_to_push * kPointerSize; - - Subu(sp, sp, Operand(stack_offset)); - for (int16_t i = kNumRegisters - 1; i >= 0; i--) { - if ((regs.bits() & (1 << i)) != 0) { - stack_offset -= kPointerSize; - sw(ToRegister(i), MemOperand(sp, stack_offset)); - } - } -} - -void TurboAssembler::MultiPop(RegList regs) { - int16_t stack_offset = 0; - - for (int16_t i = 0; i < kNumRegisters; i++) { - if ((regs.bits() & (1 << i)) != 0) { - lw(ToRegister(i), MemOperand(sp, stack_offset)); - stack_offset += kPointerSize; - } - } - addiu(sp, sp, stack_offset); -} - -void TurboAssembler::MultiPushFPU(DoubleRegList regs) { - int16_t num_to_push = regs.Count(); - int16_t stack_offset = num_to_push * kDoubleSize; - - Subu(sp, sp, Operand(stack_offset)); - for (int16_t i = kNumRegisters - 1; i >= 0; i--) { - if ((regs.bits() & (1 << i)) != 0) { - stack_offset -= kDoubleSize; - Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); - } - } -} - -void TurboAssembler::MultiPopFPU(DoubleRegList regs) { - int16_t stack_offset = 0; - - for (int16_t i = 0; i < kNumRegisters; i++) { - if ((regs.bits() & (1 << i)) != 0) { - Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); - stack_offset += kDoubleSize; - } - } - addiu(sp, sp, stack_offset); -} - -void TurboAssembler::AddPair(Register dst_low, Register dst_high, - Register left_low, Register left_high, - Register right_low, Register right_high, - Register scratch1, Register scratch2) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Register scratch3 = t8; - Addu(scratch1, left_low, right_low); - Sltu(scratch3, scratch1, left_low); - Addu(scratch2, left_high, right_high); - Addu(dst_high, scratch2, scratch3); - Move(dst_low, scratch1); -} - -void TurboAssembler::AddPair(Register dst_low, Register dst_high, - Register left_low, Register left_high, int32_t imm, - Register scratch1, Register scratch2) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Register scratch3 = t8; - li(dst_low, Operand(imm)); - sra(dst_high, dst_low, 31); - Addu(scratch1, left_low, dst_low); - Sltu(scratch3, scratch1, left_low); - Addu(scratch2, left_high, dst_high); - Addu(dst_high, scratch2, scratch3); - Move(dst_low, scratch1); -} - -void TurboAssembler::SubPair(Register dst_low, Register dst_high, - Register left_low, Register left_high, - Register right_low, Register right_high, - Register scratch1, Register scratch2) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Register scratch3 = t8; - Sltu(scratch3, left_low, right_low); - Subu(scratch1, left_low, right_low); - Subu(scratch2, left_high, right_high); - Subu(dst_high, scratch2, scratch3); - Move(dst_low, scratch1); -} - -void TurboAssembler::AndPair(Register dst_low, Register dst_high, - Register left_low, Register left_high, - Register right_low, Register right_high) { - And(dst_low, left_low, right_low); - And(dst_high, left_high, right_high); -} - -void TurboAssembler::OrPair(Register dst_low, Register dst_high, - Register left_low, Register left_high, - Register right_low, Register right_high) { - Or(dst_low, left_low, right_low); - Or(dst_high, left_high, right_high); -} -void TurboAssembler::XorPair(Register dst_low, Register dst_high, - Register left_low, Register left_high, - Register right_low, Register right_high) { - Xor(dst_low, left_low, right_low); - Xor(dst_high, left_high, right_high); -} - -void TurboAssembler::MulPair(Register dst_low, Register dst_high, - Register left_low, Register left_high, - Register right_low, Register right_high, - Register scratch1, Register scratch2) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Register scratch3 = t8; - Mulu(scratch2, scratch1, left_low, right_low); - Mul(scratch3, left_low, right_high); - Addu(scratch2, scratch2, scratch3); - Mul(scratch3, left_high, right_low); - Addu(dst_high, scratch2, scratch3); - Move(dst_low, scratch1); -} - -void TurboAssembler::ShlPair(Register dst_low, Register dst_high, - Register src_low, Register src_high, - Register shift, Register scratch1, - Register scratch2) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Label done; - Register scratch3 = t8; - And(scratch3, shift, 0x3F); - sllv(dst_low, src_low, scratch3); - Nor(scratch2, zero_reg, scratch3); - srl(scratch1, src_low, 1); - srlv(scratch1, scratch1, scratch2); - sllv(dst_high, src_high, scratch3); - Or(dst_high, dst_high, scratch1); - And(scratch1, scratch3, 32); - if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { - Branch(&done, eq, scratch1, Operand(zero_reg)); - mov(dst_high, dst_low); - mov(dst_low, zero_reg); - } else { - movn(dst_high, dst_low, scratch1); - movn(dst_low, zero_reg, scratch1); - } - bind(&done); -} - -void TurboAssembler::ShlPair(Register dst_low, Register dst_high, - Register src_low, Register src_high, - uint32_t shift, Register scratch) { - DCHECK_NE(dst_low, src_low); - DCHECK_NE(dst_high, src_low); - shift = shift & 0x3F; - if (shift == 0) { - mov(dst_high, src_high); - mov(dst_low, src_low); - } else if (shift < 32) { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - DCHECK_NE(dst_high, src_high); - srl(dst_high, src_low, 32 - shift); - Ins(dst_high, src_high, shift, 32 - shift); - sll(dst_low, src_low, shift); - } else { - sll(dst_high, src_high, shift); - sll(dst_low, src_low, shift); - srl(scratch, src_low, 32 - shift); - Or(dst_high, dst_high, scratch); - } - } else if (shift == 32) { - mov(dst_low, zero_reg); - mov(dst_high, src_low); - } else { - shift = shift - 32; - mov(dst_low, zero_reg); - sll(dst_high, src_low, shift); - } -} - -void TurboAssembler::ShrPair(Register dst_low, Register dst_high, - Register src_low, Register src_high, - Register shift, Register scratch1, - Register scratch2) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Label done; - Register scratch3 = t8; - And(scratch3, shift, 0x3F); - srlv(dst_high, src_high, scratch3); - Nor(scratch2, zero_reg, scratch3); - sll(scratch1, src_high, 1); - sllv(scratch1, scratch1, scratch2); - srlv(dst_low, src_low, scratch3); - Or(dst_low, dst_low, scratch1); - And(scratch1, scratch3, 32); - if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { - Branch(&done, eq, scratch1, Operand(zero_reg)); - mov(dst_low, dst_high); - mov(dst_high, zero_reg); - } else { - movn(dst_low, dst_high, scratch1); - movn(dst_high, zero_reg, scratch1); - } - bind(&done); -} - -void TurboAssembler::ShrPair(Register dst_low, Register dst_high, - Register src_low, Register src_high, - uint32_t shift, Register scratch) { - DCHECK_NE(dst_low, src_high); - DCHECK_NE(dst_high, src_high); - shift = shift & 0x3F; - if (shift == 0) { - mov(dst_low, src_low); - mov(dst_high, src_high); - } else if (shift < 32) { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - srl(dst_low, src_low, shift); - Ins(dst_low, src_high, 32 - shift, shift); - srl(dst_high, src_high, shift); - } else { - srl(dst_low, src_low, shift); - srl(dst_high, src_high, shift); - shift = 32 - shift; - sll(scratch, src_high, shift); - Or(dst_low, dst_low, scratch); - } - } else if (shift == 32) { - mov(dst_high, zero_reg); - mov(dst_low, src_high); - } else { - shift = shift - 32; - mov(dst_high, zero_reg); - srl(dst_low, src_high, shift); - } -} - -void TurboAssembler::SarPair(Register dst_low, Register dst_high, - Register src_low, Register src_high, - Register shift, Register scratch1, - Register scratch2) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Label done; - Register scratch3 = t8; - And(scratch3, shift, 0x3F); - srav(dst_high, src_high, scratch3); - Nor(scratch2, zero_reg, scratch3); - sll(scratch1, src_high, 1); - sllv(scratch1, scratch1, scratch2); - srlv(dst_low, src_low, scratch3); - Or(dst_low, dst_low, scratch1); - And(scratch1, scratch3, 32); - Branch(&done, eq, scratch1, Operand(zero_reg)); - mov(dst_low, dst_high); - sra(dst_high, dst_high, 31); - bind(&done); -} - -void TurboAssembler::SarPair(Register dst_low, Register dst_high, - Register src_low, Register src_high, - uint32_t shift, Register scratch) { - DCHECK_NE(dst_low, src_high); - DCHECK_NE(dst_high, src_high); - shift = shift & 0x3F; - if (shift == 0) { - mov(dst_low, src_low); - mov(dst_high, src_high); - } else if (shift < 32) { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - srl(dst_low, src_low, shift); - Ins(dst_low, src_high, 32 - shift, shift); - sra(dst_high, src_high, shift); - } else { - srl(dst_low, src_low, shift); - sra(dst_high, src_high, shift); - shift = 32 - shift; - sll(scratch, src_high, shift); - Or(dst_low, dst_low, scratch); - } - } else if (shift == 32) { - sra(dst_high, src_high, 31); - mov(dst_low, src_high); - } else { - shift = shift - 32; - sra(dst_high, src_high, 31); - sra(dst_low, src_high, shift); - } -} - -void TurboAssembler::Ext(Register rt, Register rs, uint16_t pos, - uint16_t size) { - DCHECK_LT(pos, 32); - DCHECK_LT(pos + size, 33); - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - ext_(rt, rs, pos, size); - } else { - // Move rs to rt and shift it left then right to get the - // desired bitfield on the right side and zeroes on the left. - int shift_left = 32 - (pos + size); - sll(rt, rs, shift_left); // Acts as a move if shift_left == 0. - - int shift_right = 32 - size; - if (shift_right > 0) { - srl(rt, rt, shift_right); - } - } -} - -void TurboAssembler::Ins(Register rt, Register rs, uint16_t pos, - uint16_t size) { - DCHECK_LT(pos, 32); - DCHECK_LE(pos + size, 32); - DCHECK_NE(size, 0); - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - ins_(rt, rs, pos, size); - } else { - DCHECK(rt != t8 && rs != t8); - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - Subu(scratch, zero_reg, Operand(1)); - srl(scratch, scratch, 32 - size); - and_(t8, rs, scratch); - sll(t8, t8, pos); - sll(scratch, scratch, pos); - nor(scratch, scratch, zero_reg); - and_(scratch, rt, scratch); - or_(rt, t8, scratch); - } -} - -void TurboAssembler::ExtractBits(Register dest, Register source, Register pos, - int size, bool sign_extend) { - srav(dest, source, pos); - Ext(dest, dest, 0, size); - if (size == 8) { - if (sign_extend) { - Seb(dest, dest); - } - } else if (size == 16) { - if (sign_extend) { - Seh(dest, dest); - } - } else { - UNREACHABLE(); - } -} - -void TurboAssembler::InsertBits(Register dest, Register source, Register pos, - int size) { - Ror(dest, dest, pos); - Ins(dest, source, 0, size); - { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - Subu(scratch, zero_reg, pos); - Ror(dest, dest, scratch); - } -} - -void TurboAssembler::Seb(Register rd, Register rt) { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - seb(rd, rt); - } else { - DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)); - sll(rd, rt, 24); - sra(rd, rd, 24); - } -} - -void TurboAssembler::Seh(Register rd, Register rt) { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - seh(rd, rt); - } else { - DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)); - sll(rd, rt, 16); - sra(rd, rd, 16); - } -} - -void TurboAssembler::Neg_s(FPURegister fd, FPURegister fs) { - if (IsMipsArchVariant(kMips32r6)) { - // r6 neg_s changes the sign for NaN-like operands as well. - neg_s(fd, fs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - BlockTrampolinePoolScope block_trampoline_pool(this); - Label is_nan, done; - Register scratch1 = t8; - Register scratch2 = t9; - CompareIsNanF32(fs, fs); - BranchTrueShortF(&is_nan); - Branch(USE_DELAY_SLOT, &done); - // For NaN input, neg_s will return the same NaN value, - // while the sign has to be changed separately. - neg_s(fd, fs); // In delay slot. - bind(&is_nan); - mfc1(scratch1, fs); - li(scratch2, kBinary32SignMask); - Xor(scratch1, scratch1, scratch2); - mtc1(scratch1, fd); - bind(&done); - } -} - -void TurboAssembler::Neg_d(FPURegister fd, FPURegister fs) { - if (IsMipsArchVariant(kMips32r6)) { - // r6 neg_d changes the sign for NaN-like operands as well. - neg_d(fd, fs); - } else { - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kLoongson)); - BlockTrampolinePoolScope block_trampoline_pool(this); - Label is_nan, done; - Register scratch1 = t8; - Register scratch2 = t9; - CompareIsNanF64(fs, fs); - BranchTrueShortF(&is_nan); - Branch(USE_DELAY_SLOT, &done); - // For NaN input, neg_d will return the same NaN value, - // while the sign has to be changed separately. - neg_d(fd, fs); // In delay slot. - bind(&is_nan); - Move(fd, fs); - Mfhc1(scratch1, fd); - li(scratch2, HeapNumber::kSignMask); - Xor(scratch1, scratch1, scratch2); - Mthc1(scratch1, fd); - bind(&done); - } -} - -void TurboAssembler::Cvt_d_uw(FPURegister fd, Register rs, - FPURegister scratch) { - // In FP64Mode we do conversion from long. - if (IsFp64Mode()) { - mtc1(rs, scratch); - Mthc1(zero_reg, scratch); - cvt_d_l(fd, scratch); - } else { - // Convert rs to a FP value in fd. - DCHECK(fd != scratch); - DCHECK(rs != at); - - Label msb_clear, conversion_done; - // For a value which is < 2^31, regard it as a signed positve word. - Branch(&msb_clear, ge, rs, Operand(zero_reg), USE_DELAY_SLOT); - mtc1(rs, fd); - { - UseScratchRegisterScope temps(this); - Register scratch1 = temps.Acquire(); - li(scratch1, 0x41F00000); // FP value: 2^32. - - // For unsigned inputs > 2^31, we convert to double as a signed int32, - // then add 2^32 to move it back to unsigned value in range 2^31..2^31-1. - mtc1(zero_reg, scratch); - Mthc1(scratch1, scratch); - } - - cvt_d_w(fd, fd); - - Branch(USE_DELAY_SLOT, &conversion_done); - add_d(fd, fd, scratch); - - bind(&msb_clear); - cvt_d_w(fd, fd); - - bind(&conversion_done); - } -} - -void TurboAssembler::Trunc_uw_d(FPURegister fd, FPURegister fs, - FPURegister scratch) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Trunc_uw_d(t8, fs, scratch); - mtc1(t8, fd); -} - -void TurboAssembler::Trunc_uw_s(FPURegister fd, FPURegister fs, - FPURegister scratch) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Trunc_uw_s(t8, fs, scratch); - mtc1(t8, fd); -} - -void TurboAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { - if (IsMipsArchVariant(kLoongson) && fd == fs) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Mfhc1(t8, fs); - trunc_w_d(fd, fs); - Mthc1(t8, fs); - } else { - trunc_w_d(fd, fs); - } -} - -void TurboAssembler::Round_w_d(FPURegister fd, FPURegister fs) { - if (IsMipsArchVariant(kLoongson) && fd == fs) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Mfhc1(t8, fs); - round_w_d(fd, fs); - Mthc1(t8, fs); - } else { - round_w_d(fd, fs); - } -} - -void TurboAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { - if (IsMipsArchVariant(kLoongson) && fd == fs) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Mfhc1(t8, fs); - floor_w_d(fd, fs); - Mthc1(t8, fs); - } else { - floor_w_d(fd, fs); - } -} - -void TurboAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { - if (IsMipsArchVariant(kLoongson) && fd == fs) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Mfhc1(t8, fs); - ceil_w_d(fd, fs); - Mthc1(t8, fs); - } else { - ceil_w_d(fd, fs); - } -} - -void TurboAssembler::Trunc_uw_d(Register rd, FPURegister fs, - FPURegister scratch) { - DCHECK(fs != scratch); - DCHECK(rd != at); - - { - // Load 2^31 into scratch as its float representation. - UseScratchRegisterScope temps(this); - Register scratch1 = temps.Acquire(); - li(scratch1, 0x41E00000); - mtc1(zero_reg, scratch); - Mthc1(scratch1, scratch); - } - // Test if scratch > fs. - // If fs < 2^31 we can convert it normally. - Label simple_convert; - CompareF64(OLT, fs, scratch); - BranchTrueShortF(&simple_convert); - - // First we subtract 2^31 from fs, then trunc it to rd - // and add 2^31 to rd. - sub_d(scratch, fs, scratch); - trunc_w_d(scratch, scratch); - mfc1(rd, scratch); - Or(rd, rd, 1 << 31); - - Label done; - Branch(&done); - // Simple conversion. - bind(&simple_convert); - trunc_w_d(scratch, fs); - mfc1(rd, scratch); - - bind(&done); -} - -void TurboAssembler::Trunc_uw_s(Register rd, FPURegister fs, - FPURegister scratch) { - DCHECK(fs != scratch); - DCHECK(rd != at); - - { - // Load 2^31 into scratch as its float representation. - UseScratchRegisterScope temps(this); - Register scratch1 = temps.Acquire(); - li(scratch1, 0x4F000000); - mtc1(scratch1, scratch); - } - // Test if scratch > fs. - // If fs < 2^31 we can convert it normally. - Label simple_convert; - CompareF32(OLT, fs, scratch); - BranchTrueShortF(&simple_convert); - - // First we subtract 2^31 from fs, then trunc it to rd - // and add 2^31 to rd. - sub_s(scratch, fs, scratch); - trunc_w_s(scratch, scratch); - mfc1(rd, scratch); - Or(rd, rd, 1 << 31); - - Label done; - Branch(&done); - // Simple conversion. - bind(&simple_convert); - trunc_w_s(scratch, fs); - mfc1(rd, scratch); - - bind(&done); -} - -template -void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src, - FPURoundingMode mode, RoundFunc round) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Register scratch = t8; - Register scratch2 = t9; - if (IsMipsArchVariant(kMips32r6)) { - cfc1(scratch, FCSR); - li(at, Operand(mode)); - ctc1(at, FCSR); - rint_d(dst, src); - ctc1(scratch, FCSR); - } else { - Label done; - Mfhc1(scratch, src); - Ext(at, scratch, HeapNumber::kExponentShift, HeapNumber::kExponentBits); - Branch(USE_DELAY_SLOT, &done, hs, at, - Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); - mov_d(dst, src); - round(this, dst, src); - Move(at, scratch2, dst); - or_(at, at, scratch2); - Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg)); - cvt_d_l(dst, dst); - srl(at, scratch, 31); - sll(at, at, 31); - Mthc1(at, dst); - bind(&done); - } -} - -void TurboAssembler::Floor_d_d(FPURegister dst, FPURegister src) { - RoundDouble(dst, src, mode_floor, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->floor_l_d(dst, src); - }); -} - -void TurboAssembler::Ceil_d_d(FPURegister dst, FPURegister src) { - RoundDouble(dst, src, mode_ceil, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->ceil_l_d(dst, src); - }); -} - -void TurboAssembler::Trunc_d_d(FPURegister dst, FPURegister src) { - RoundDouble(dst, src, mode_trunc, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->trunc_l_d(dst, src); - }); -} - -void TurboAssembler::Round_d_d(FPURegister dst, FPURegister src) { - RoundDouble(dst, src, mode_round, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->round_l_d(dst, src); - }); -} - -template -void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src, - FPURoundingMode mode, RoundFunc round) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Register scratch = t8; - if (IsMipsArchVariant(kMips32r6)) { - cfc1(scratch, FCSR); - li(at, Operand(mode)); - ctc1(at, FCSR); - rint_s(dst, src); - ctc1(scratch, FCSR); - } else { - int32_t kFloat32ExponentBias = 127; - int32_t kFloat32MantissaBits = 23; - int32_t kFloat32ExponentBits = 8; - Label done; - if (!IsDoubleZeroRegSet()) { - Move(kDoubleRegZero, 0.0); - } - mfc1(scratch, src); - Ext(at, scratch, kFloat32MantissaBits, kFloat32ExponentBits); - Branch(USE_DELAY_SLOT, &done, hs, at, - Operand(kFloat32ExponentBias + kFloat32MantissaBits)); - // Canonicalize the result. - sub_s(dst, src, kDoubleRegZero); - round(this, dst, src); - mfc1(at, dst); - Branch(USE_DELAY_SLOT, &done, ne, at, Operand(zero_reg)); - cvt_s_w(dst, dst); - srl(at, scratch, 31); - sll(at, at, 31); - mtc1(at, dst); - bind(&done); - } -} - -void TurboAssembler::Floor_s_s(FPURegister dst, FPURegister src) { - RoundFloat(dst, src, mode_floor, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->floor_w_s(dst, src); - }); -} - -void TurboAssembler::Ceil_s_s(FPURegister dst, FPURegister src) { - RoundFloat(dst, src, mode_ceil, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->ceil_w_s(dst, src); - }); -} - -void TurboAssembler::Trunc_s_s(FPURegister dst, FPURegister src) { - RoundFloat(dst, src, mode_trunc, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->trunc_w_s(dst, src); - }); -} - -void TurboAssembler::Round_s_s(FPURegister dst, FPURegister src) { - RoundFloat(dst, src, mode_round, - [](TurboAssembler* tasm, FPURegister dst, FPURegister src) { - tasm->round_w_s(dst, src); - }); -} - -void TurboAssembler::Mthc1(Register rt, FPURegister fs) { - if (IsFp32Mode()) { - mtc1(rt, fs.high()); - } else { - DCHECK(IsFp64Mode() || IsFpxxMode()); - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - mthc1(rt, fs); - } -} - -void TurboAssembler::Mfhc1(Register rt, FPURegister fs) { - if (IsFp32Mode()) { - mfc1(rt, fs.high()); - } else { - DCHECK(IsFp64Mode() || IsFpxxMode()); - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - mfhc1(rt, fs); - } -} - -void TurboAssembler::Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, - FPURegister ft, FPURegister scratch) { - if (IsMipsArchVariant(kMips32r2)) { - madd_s(fd, fr, fs, ft); - } else { - DCHECK(fr != scratch && fs != scratch && ft != scratch); - mul_s(scratch, fs, ft); - add_s(fd, fr, scratch); - } -} - -void TurboAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, - FPURegister ft, FPURegister scratch) { - if (IsMipsArchVariant(kMips32r2)) { - madd_d(fd, fr, fs, ft); - } else { - DCHECK(fr != scratch && fs != scratch && ft != scratch); - mul_d(scratch, fs, ft); - add_d(fd, fr, scratch); - } -} - -void TurboAssembler::Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, - FPURegister ft, FPURegister scratch) { - if (IsMipsArchVariant(kMips32r2)) { - msub_s(fd, fr, fs, ft); - } else { - DCHECK(fr != scratch && fs != scratch && ft != scratch); - mul_s(scratch, fs, ft); - sub_s(fd, scratch, fr); - } -} - -void TurboAssembler::Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, - FPURegister ft, FPURegister scratch) { - if (IsMipsArchVariant(kMips32r2)) { - msub_d(fd, fr, fs, ft); - } else { - DCHECK(fr != scratch && fs != scratch && ft != scratch); - mul_d(scratch, fs, ft); - sub_d(fd, scratch, fr); - } -} - -void TurboAssembler::CompareF(SecondaryField sizeField, FPUCondition cc, - FPURegister cmp1, FPURegister cmp2) { - if (IsMipsArchVariant(kMips32r6)) { - sizeField = sizeField == D ? L : W; - DCHECK(cmp1 != kDoubleCompareReg && cmp2 != kDoubleCompareReg); - cmp(cc, sizeField, kDoubleCompareReg, cmp1, cmp2); - } else { - c(cc, sizeField, cmp1, cmp2); - } -} - -void TurboAssembler::CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, - FPURegister cmp2) { - CompareF(sizeField, UN, cmp1, cmp2); -} - -void TurboAssembler::BranchTrueShortF(Label* target, BranchDelaySlot bd) { - if (IsMipsArchVariant(kMips32r6)) { - bc1nez(target, kDoubleCompareReg); - } else { - bc1t(target); - } - if (bd == PROTECT) { - nop(); - } -} - -void TurboAssembler::BranchFalseShortF(Label* target, BranchDelaySlot bd) { - if (IsMipsArchVariant(kMips32r6)) { - bc1eqz(target, kDoubleCompareReg); - } else { - bc1f(target); - } - if (bd == PROTECT) { - nop(); - } -} - -void TurboAssembler::BranchTrueF(Label* target, BranchDelaySlot bd) { - bool long_branch = - target->is_bound() ? !is_near(target) : is_trampoline_emitted(); - if (long_branch) { - Label skip; - BranchFalseShortF(&skip); - BranchLong(target, bd); - bind(&skip); - } else { - BranchTrueShortF(target, bd); - } -} - -void TurboAssembler::BranchFalseF(Label* target, BranchDelaySlot bd) { - bool long_branch = - target->is_bound() ? !is_near(target) : is_trampoline_emitted(); - if (long_branch) { - Label skip; - BranchTrueShortF(&skip); - BranchLong(target, bd); - bind(&skip); - } else { - BranchFalseShortF(target, bd); - } -} - -void TurboAssembler::BranchMSA(Label* target, MSABranchDF df, - MSABranchCondition cond, MSARegister wt, - BranchDelaySlot bd) { - { - BlockTrampolinePoolScope block_trampoline_pool(this); - - if (target) { - bool long_branch = - target->is_bound() ? !is_near(target) : is_trampoline_emitted(); - if (long_branch) { - Label skip; - MSABranchCondition neg_cond = NegateMSABranchCondition(cond); - BranchShortMSA(df, &skip, neg_cond, wt, bd); - BranchLong(target, bd); - bind(&skip); - } else { - BranchShortMSA(df, target, cond, wt, bd); - } - } - } -} - -void TurboAssembler::BranchShortMSA(MSABranchDF df, Label* target, - MSABranchCondition cond, MSARegister wt, - BranchDelaySlot bd) { - if (IsMipsArchVariant(kMips32r6)) { - BlockTrampolinePoolScope block_trampoline_pool(this); - if (target) { - switch (cond) { - case all_not_zero: - switch (df) { - case MSA_BRANCH_D: - bnz_d(wt, target); - break; - case MSA_BRANCH_W: - bnz_w(wt, target); - break; - case MSA_BRANCH_H: - bnz_h(wt, target); - break; - case MSA_BRANCH_B: - default: - bnz_b(wt, target); - } - break; - case one_elem_not_zero: - bnz_v(wt, target); - break; - case one_elem_zero: - switch (df) { - case MSA_BRANCH_D: - bz_d(wt, target); - break; - case MSA_BRANCH_W: - bz_w(wt, target); - break; - case MSA_BRANCH_H: - bz_h(wt, target); - break; - case MSA_BRANCH_B: - default: - bz_b(wt, target); - } - break; - case all_zero: - bz_v(wt, target); - break; - default: - UNREACHABLE(); - } - } - } - if (bd == PROTECT) { - nop(); - } -} - -void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) { - if (IsFp32Mode()) { - mtc1(src_low, dst); - } else { - DCHECK(IsFp64Mode() || IsFpxxMode()); - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - DCHECK(src_low != scratch); - mfhc1(scratch, dst); - mtc1(src_low, dst); - mthc1(scratch, dst); - } -} - -void TurboAssembler::Move(FPURegister dst, uint32_t src) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, Operand(static_cast(src))); - mtc1(scratch, dst); -} - -void TurboAssembler::Move(FPURegister dst, uint64_t src) { - // Handle special values first. - if (src == base::bit_cast(0.0) && has_double_zero_reg_set_) { - mov_d(dst, kDoubleRegZero); - } else if (src == base::bit_cast(-0.0) && - has_double_zero_reg_set_) { - Neg_d(dst, kDoubleRegZero); - } else { - uint32_t lo = src & 0xFFFFFFFF; - uint32_t hi = src >> 32; - // Move the low part of the double into the lower of the corresponding FPU - // register of FPU register pair. - if (lo != 0) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, Operand(lo)); - mtc1(scratch, dst); - } else { - mtc1(zero_reg, dst); - } - // Move the high part of the double into the higher of the corresponding FPU - // register of FPU register pair. - if (hi != 0) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, Operand(hi)); - Mthc1(scratch, dst); - } else { - Mthc1(zero_reg, dst); - } - if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true; - } -} - -void TurboAssembler::LoadZeroOnCondition(Register rd, Register rs, - const Operand& rt, Condition cond) { - BlockTrampolinePoolScope block_trampoline_pool(this); - switch (cond) { - case cc_always: - mov(rd, zero_reg); - break; - case eq: - if (rs == zero_reg) { - if (rt.is_reg()) { - LoadZeroIfConditionZero(rd, rt.rm()); - } else { - if (rt.immediate() == 0) { - mov(rd, zero_reg); - } else { - nop(); - } - } - } else if (IsZero(rt)) { - LoadZeroIfConditionZero(rd, rs); - } else { - Subu(t9, rs, rt); - LoadZeroIfConditionZero(rd, t9); - } - break; - case ne: - if (rs == zero_reg) { - if (rt.is_reg()) { - LoadZeroIfConditionNotZero(rd, rt.rm()); - } else { - if (rt.immediate() != 0) { - mov(rd, zero_reg); - } else { - nop(); - } - } - } else if (IsZero(rt)) { - LoadZeroIfConditionNotZero(rd, rs); - } else { - Subu(t9, rs, rt); - LoadZeroIfConditionNotZero(rd, t9); - } - break; - - // Signed comparison. - case greater: - Sgt(t9, rs, rt); - LoadZeroIfConditionNotZero(rd, t9); - break; - case greater_equal: - Sge(t9, rs, rt); - LoadZeroIfConditionNotZero(rd, t9); - // rs >= rt - break; - case less: - Slt(t9, rs, rt); - LoadZeroIfConditionNotZero(rd, t9); - // rs < rt - break; - case less_equal: - Sle(t9, rs, rt); - LoadZeroIfConditionNotZero(rd, t9); - // rs <= rt - break; - - // Unsigned comparison. - case Ugreater: - Sgtu(t9, rs, rt); - LoadZeroIfConditionNotZero(rd, t9); - // rs > rt - break; - - case Ugreater_equal: - Sgeu(t9, rs, rt); - LoadZeroIfConditionNotZero(rd, t9); - // rs >= rt - break; - case Uless: - Sltu(t9, rs, rt); - LoadZeroIfConditionNotZero(rd, t9); - // rs < rt - break; - case Uless_equal: - Sleu(t9, rs, rt); - LoadZeroIfConditionNotZero(rd, t9); - // rs <= rt - break; - default: - UNREACHABLE(); - } -} - -void TurboAssembler::LoadZeroIfConditionNotZero(Register dest, - Register condition) { - if (IsMipsArchVariant(kMips32r6)) { - seleqz(dest, dest, condition); - } else { - Movn(dest, zero_reg, condition); - } -} - -void TurboAssembler::LoadZeroIfConditionZero(Register dest, - Register condition) { - if (IsMipsArchVariant(kMips32r6)) { - selnez(dest, dest, condition); - } else { - Movz(dest, zero_reg, condition); - } -} - -void TurboAssembler::LoadZeroIfFPUCondition(Register dest) { - if (IsMipsArchVariant(kMips32r6)) { - mfc1(kScratchReg, kDoubleCompareReg); - LoadZeroIfConditionNotZero(dest, kScratchReg); - } else { - Movt(dest, zero_reg); - } -} - -void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest) { - if (IsMipsArchVariant(kMips32r6)) { - mfc1(kScratchReg, kDoubleCompareReg); - LoadZeroIfConditionZero(dest, kScratchReg); - } else { - Movf(dest, zero_reg); - } -} - -void TurboAssembler::Movz(Register rd, Register rs, Register rt) { - if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { - Label done; - Branch(&done, ne, rt, Operand(zero_reg)); - mov(rd, rs); - bind(&done); - } else { - movz(rd, rs, rt); - } -} - -void TurboAssembler::Movn(Register rd, Register rs, Register rt) { - if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { - Label done; - Branch(&done, eq, rt, Operand(zero_reg)); - mov(rd, rs); - bind(&done); - } else { - movn(rd, rs, rt); - } -} - -void TurboAssembler::Movt(Register rd, Register rs, uint16_t cc) { - if (IsMipsArchVariant(kLoongson)) { - BlockTrampolinePoolScope block_trampoline_pool(this); - // Tests an FP condition code and then conditionally move rs to rd. - // We do not currently use any FPU cc bit other than bit 0. - DCHECK_EQ(cc, 0); - DCHECK(rs != t8 && rd != t8); - Label done; - Register scratch = t8; - // For testing purposes we need to fetch content of the FCSR register and - // than test its cc (floating point condition code) bit (for cc = 0, it is - // 24. bit of the FCSR). - cfc1(scratch, FCSR); - // For the MIPS I, II and III architectures, the contents of scratch is - // UNPREDICTABLE for the instruction immediately following CFC1. - nop(); - srl(scratch, scratch, 16); - andi(scratch, scratch, 0x0080); - Branch(&done, eq, scratch, Operand(zero_reg)); - mov(rd, rs); - bind(&done); - } else { - movt(rd, rs, cc); - } -} - -void TurboAssembler::Movf(Register rd, Register rs, uint16_t cc) { - if (IsMipsArchVariant(kLoongson)) { - BlockTrampolinePoolScope block_trampoline_pool(this); - // Tests an FP condition code and then conditionally move rs to rd. - // We do not currently use any FPU cc bit other than bit 0. - DCHECK_EQ(cc, 0); - DCHECK(rs != t8 && rd != t8); - Label done; - Register scratch = t8; - // For testing purposes we need to fetch content of the FCSR register and - // than test its cc (floating point condition code) bit (for cc = 0, it is - // 24. bit of the FCSR). - cfc1(scratch, FCSR); - // For the MIPS I, II and III architectures, the contents of scratch is - // UNPREDICTABLE for the instruction immediately following CFC1. - nop(); - srl(scratch, scratch, 16); - andi(scratch, scratch, 0x0080); - Branch(&done, ne, scratch, Operand(zero_reg)); - mov(rd, rs); - bind(&done); - } else { - movf(rd, rs, cc); - } -} - -void TurboAssembler::Clz(Register rd, Register rs) { - if (IsMipsArchVariant(kLoongson)) { - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK(rd != t8 && rd != t9 && rs != t8 && rs != t9); - Register mask = t8; - Register scratch = t9; - Label loop, end; - { - UseScratchRegisterScope temps(this); - Register scratch1 = temps.Acquire(); - mov(scratch1, rs); - mov(rd, zero_reg); - lui(mask, 0x8000); - bind(&loop); - and_(scratch, scratch1, mask); - } - Branch(&end, ne, scratch, Operand(zero_reg)); - addiu(rd, rd, 1); - Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT); - srl(mask, mask, 1); - bind(&end); - } else { - clz(rd, rs); - } -} - -void TurboAssembler::Ctz(Register rd, Register rs) { - if (IsMipsArchVariant(kMips32r6)) { - // We don't have an instruction to count the number of trailing zeroes. - // Start by flipping the bits end-for-end so we can count the number of - // leading zeroes instead. - Ror(rd, rs, 16); - wsbh(rd, rd); - bitswap(rd, rd); - Clz(rd, rd); - } else { - // Convert trailing zeroes to trailing ones, and bits to their left - // to zeroes. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - Addu(scratch, rs, -1); - Xor(rd, scratch, rs); - And(rd, rd, scratch); - // Count number of leading zeroes. - Clz(rd, rd); - // Subtract number of leading zeroes from 32 to get number of trailing - // ones. Remember that the trailing ones were formerly trailing zeroes. - li(scratch, 32); - Subu(rd, scratch, rd); - } -} - -void TurboAssembler::Popcnt(Register rd, Register rs) { - ASM_CODE_COMMENT(this); - // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel - // - // A generalization of the best bit counting method to integers of - // bit-widths up to 128 (parameterized by type T) is this: - // - // v = v - ((v >> 1) & (T)~(T)0/3); // temp - // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp - // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp - // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count - // - // For comparison, for 32-bit quantities, this algorithm can be executed - // using 20 MIPS instructions (the calls to LoadConst32() generate two - // machine instructions each for the values being used in this algorithm). - // A(n unrolled) loop-based algorithm requires 25 instructions. - // - // For 64-bit quantities, this algorithm gets executed twice, (once - // for in_lo, and again for in_hi), but saves a few instructions - // because the mask values only have to be loaded once. Using this - // algorithm the count for a 64-bit operand can be performed in 29 - // instructions compared to a loop-based algorithm which requires 47 - // instructions. - uint32_t B0 = 0x55555555; // (T)~(T)0/3 - uint32_t B1 = 0x33333333; // (T)~(T)0/15*3 - uint32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15 - uint32_t value = 0x01010101; // (T)~(T)0/255 - uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE - BlockTrampolinePoolScope block_trampoline_pool(this); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - Register scratch2 = t8; - srl(scratch, rs, 1); - li(scratch2, B0); - And(scratch, scratch, scratch2); - Subu(scratch, rs, scratch); - li(scratch2, B1); - And(rd, scratch, scratch2); - srl(scratch, scratch, 2); - And(scratch, scratch, scratch2); - Addu(scratch, rd, scratch); - srl(rd, scratch, 4); - Addu(rd, rd, scratch); - li(scratch2, B2); - And(rd, rd, scratch2); - li(scratch, value); - Mul(rd, rd, scratch); - srl(rd, rd, shift); -} - -void TurboAssembler::TryInlineTruncateDoubleToI(Register result, - DoubleRegister double_input, - Label* done) { - BlockTrampolinePoolScope block_trampoline_pool(this); - DoubleRegister single_scratch = kScratchDoubleReg.low(); - Register scratch = t9; - - // Try a conversion to a signed integer. - trunc_w_d(single_scratch, double_input); - mfc1(result, single_scratch); - // Retrieve the FCSR. - cfc1(scratch, FCSR); - // Check for overflow and NaNs. - And(scratch, scratch, - kFCSROverflowCauseMask | kFCSRUnderflowCauseMask | - kFCSRInvalidOpCauseMask); - // If we had no exceptions we are done. - Branch(done, eq, scratch, Operand(zero_reg)); -} - -void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, - Register result, - DoubleRegister double_input, - StubCallMode stub_mode) { - Label done; - - TryInlineTruncateDoubleToI(result, double_input, &done); - - // If we fell through then inline version didn't succeed - call stub instead. - push(ra); - Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack. - Sdc1(double_input, MemOperand(sp, 0)); - -#if V8_ENABLE_WEBASSEMBLY - if (stub_mode == StubCallMode::kCallWasmRuntimeStub) { - Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL); -#else - // For balance. - if (false) { -#endif // V8_ENABLE_WEBASSEMBLY - } else { - Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET); - } - lw(result, MemOperand(sp, 0)); - - Addu(sp, sp, Operand(kDoubleSize)); - pop(ra); - - bind(&done); -} - -// Emulated condtional branches do not emit a nop in the branch delay slot. -// -// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. -#define BRANCH_ARGS_CHECK(cond, rs, rt) \ - DCHECK((cond == cc_always && rs == zero_reg && rt.rm() == zero_reg) || \ - (cond != cc_always && (rs != zero_reg || rt.rm() != zero_reg))) - -void TurboAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) { - DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset)); - BranchShort(offset, bdslot); -} - -void TurboAssembler::Branch(int32_t offset, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot) { - bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); - DCHECK(is_near); - USE(is_near); -} - -void TurboAssembler::Branch(Label* L, BranchDelaySlot bdslot) { - if (L->is_bound()) { - if (is_near_branch(L)) { - BranchShort(L, bdslot); - } else { - BranchLong(L, bdslot); - } - } else { - if (is_trampoline_emitted()) { - BranchLong(L, bdslot); - } else { - BranchShort(L, bdslot); - } - } -} - -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot) { - if (L->is_bound()) { - if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) { - if (cond != cc_always) { - Label skip; - Condition neg_cond = NegateCondition(cond); - BranchShort(&skip, neg_cond, rs, rt); - BranchLong(L, bdslot); - bind(&skip); - } else { - BranchLong(L, bdslot); - } - } - } else { - if (is_trampoline_emitted()) { - if (cond != cc_always) { - Label skip; - Condition neg_cond = NegateCondition(cond); - BranchShort(&skip, neg_cond, rs, rt); - BranchLong(L, bdslot); - bind(&skip); - } else { - BranchLong(L, bdslot); - } - } else { - BranchShort(L, cond, rs, rt, bdslot); - } - } -} - -void TurboAssembler::Branch(Label* L, Condition cond, Register rs, - RootIndex index, BranchDelaySlot bdslot) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - LoadRoot(scratch, index); - Branch(L, cond, rs, Operand(scratch), bdslot); -} - -void TurboAssembler::BranchShortHelper(int16_t offset, Label* L, - BranchDelaySlot bdslot) { - DCHECK(L == nullptr || offset == 0); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - b(offset); - - // Emit a nop in the branch delay slot if required. - if (bdslot == PROTECT) nop(); -} - -void TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L) { - DCHECK(L == nullptr || offset == 0); - offset = GetOffset(offset, L, OffsetSize::kOffset26); - bc(offset); -} - -void TurboAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { - DCHECK(is_int26(offset)); - BranchShortHelperR6(offset, nullptr); - } else { - DCHECK(is_int16(offset)); - BranchShortHelper(offset, nullptr, bdslot); - } -} - -void TurboAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { - BranchShortHelperR6(0, L); - } else { - BranchShortHelper(0, L, bdslot); - } -} - -int32_t TurboAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { - if (L) { - offset = branch_offset_helper(L, bits) >> 2; - } else { - DCHECK(is_intn(offset, bits)); - } - return offset; -} - -Register TurboAssembler::GetRtAsRegisterHelper(const Operand& rt, - Register scratch) { - Register r2 = no_reg; - if (rt.is_reg()) { - r2 = rt.rm(); - } else { - r2 = scratch; - li(r2, rt); - } - - return r2; -} - -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, - OffsetSize bits) { - if (!is_near(L, bits)) return false; - *offset = GetOffset(*offset, L, bits); - return true; -} - -bool TurboAssembler::CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, - Register* scratch, const Operand& rt) { - if (!is_near(L, bits)) return false; - *scratch = GetRtAsRegisterHelper(rt, *scratch); - *offset = GetOffset(*offset, L, bits); - return true; -} - -bool TurboAssembler::BranchShortHelperR6(int32_t offset, Label* L, - Condition cond, Register rs, - const Operand& rt) { - DCHECK(L == nullptr || offset == 0); - UseScratchRegisterScope temps(this); - Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; - - // Be careful to always use shifted_branch_offset only just before the - // branch instruction, as the location will be remember for patching the - // target. - { - BlockTrampolinePoolScope block_trampoline_pool(this); - switch (cond) { - case cc_always: - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; - bc(offset); - break; - case eq: - if (rt.is_reg() && rs.code() == rt.rm().code()) { - // Pre R6 beq is used here to make the code patchable. Otherwise bc - // should be used which has no condition field so is not patchable. - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - beq(rs, scratch, offset); - nop(); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; - beqzc(rs, offset); - } else { - // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - beqc(rs, scratch, offset); - } - break; - case ne: - if (rt.is_reg() && rs.code() == rt.rm().code()) { - // Pre R6 bne is used here to make the code patchable. Otherwise we - // should not generate any instruction. - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - bne(rs, scratch, offset); - nop(); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; - bnezc(rs, offset); - } else { - // We don't want any other register but scratch clobbered. - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - bnec(rs, scratch, offset); - } - break; - - // Signed comparison. - case greater: - // rs > rt - if (rt.is_reg() && rs.code() == rt.rm().code()) { - break; // No code needs to be emitted. - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - bltzc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; - bgtzc(rs, offset); - } else { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - DCHECK(rs != scratch); - bltc(scratch, rs, offset); - } - break; - case greater_equal: - // rs >= rt - if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; - bc(offset); - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - blezc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; - bgezc(rs, offset); - } else { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - DCHECK(rs != scratch); - bgec(rs, scratch, offset); - } - break; - case less: - // rs < rt - if (rt.is_reg() && rs.code() == rt.rm().code()) { - break; // No code needs to be emitted. - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - bgtzc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; - bltzc(rs, offset); - } else { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - DCHECK(rs != scratch); - bltc(rs, scratch, offset); - } - break; - case less_equal: - // rs <= rt - if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; - bc(offset); - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - bgezc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; - blezc(rs, offset); - } else { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - DCHECK(rs != scratch); - bgec(scratch, rs, offset); - } - break; - - // Unsigned comparison. - case Ugreater: - // rs > rt - if (rt.is_reg() && rs.code() == rt.rm().code()) { - break; // No code needs to be emitted. - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) - return false; - bnezc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; - bnezc(rs, offset); - } else { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - DCHECK(rs != scratch); - bltuc(scratch, rs, offset); - } - break; - case Ugreater_equal: - // rs >= rt - if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; - bc(offset); - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) - return false; - beqzc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; - bc(offset); - } else { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - DCHECK(rs != scratch); - bgeuc(rs, scratch, offset); - } - break; - case Uless: - // rs < rt - if (rt.is_reg() && rs.code() == rt.rm().code()) { - break; // No code needs to be emitted. - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset21, &scratch, rt)) - return false; - bnezc(scratch, offset); - } else if (IsZero(rt)) { - break; // No code needs to be emitted. - } else { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - DCHECK(rs != scratch); - bltuc(rs, scratch, offset); - } - break; - case Uless_equal: - // rs <= rt - if (rt.is_reg() && rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; - bc(offset); - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26, &scratch, rt)) - return false; - bc(offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset21)) return false; - beqzc(rs, offset); - } else { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - DCHECK(rs != scratch); - bgeuc(scratch, rs, offset); - } - break; - default: - UNREACHABLE(); - } - } - CheckTrampolinePoolQuick(1); - return true; -} - -bool TurboAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond, - Register rs, const Operand& rt, - BranchDelaySlot bdslot) { - DCHECK(L == nullptr || offset == 0); - if (!is_near(L, OffsetSize::kOffset16)) return false; - - UseScratchRegisterScope temps(this); - Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; - int32_t offset32; - - // Be careful to always use shifted_branch_offset only just before the - // branch instruction, as the location will be remember for patching the - // target. - { - BlockTrampolinePoolScope block_trampoline_pool(this); - switch (cond) { - case cc_always: - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - b(offset32); - break; - case eq: - if (IsZero(rt)) { - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - beq(rs, zero_reg, offset32); - } else { - // We don't want any other register but scratch clobbered. - scratch = GetRtAsRegisterHelper(rt, scratch); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - beq(rs, scratch, offset32); - } - break; - case ne: - if (IsZero(rt)) { - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bne(rs, zero_reg, offset32); - } else { - // We don't want any other register but scratch clobbered. - scratch = GetRtAsRegisterHelper(rt, scratch); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bne(rs, scratch, offset32); - } - break; - - // Signed comparison. - case greater: - if (IsZero(rt)) { - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bgtz(rs, offset32); - } else { - Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bne(scratch, zero_reg, offset32); - } - break; - case greater_equal: - if (IsZero(rt)) { - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bgez(rs, offset32); - } else { - Slt(scratch, rs, rt); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - beq(scratch, zero_reg, offset32); - } - break; - case less: - if (IsZero(rt)) { - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bltz(rs, offset32); - } else { - Slt(scratch, rs, rt); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bne(scratch, zero_reg, offset32); - } - break; - case less_equal: - if (IsZero(rt)) { - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - blez(rs, offset32); - } else { - Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - beq(scratch, zero_reg, offset32); - } - break; - - // Unsigned comparison. - case Ugreater: - if (IsZero(rt)) { - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bne(rs, zero_reg, offset32); - } else { - Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bne(scratch, zero_reg, offset32); - } - break; - case Ugreater_equal: - if (IsZero(rt)) { - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - b(offset32); - } else { - Sltu(scratch, rs, rt); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - beq(scratch, zero_reg, offset32); - } - break; - case Uless: - if (IsZero(rt)) { - return true; // No code needs to be emitted. - } else { - Sltu(scratch, rs, rt); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - bne(scratch, zero_reg, offset32); - } - break; - case Uless_equal: - if (IsZero(rt)) { - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - beq(rs, zero_reg, offset32); - } else { - Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - offset32 = GetOffset(offset, L, OffsetSize::kOffset16); - beq(scratch, zero_reg, offset32); - } - break; - default: - UNREACHABLE(); - } - } - // Emit a nop in the branch delay slot if required. - if (bdslot == PROTECT) nop(); - - return true; -} - -bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt, - BranchDelaySlot bdslot) { - BRANCH_ARGS_CHECK(cond, rs, rt); - if (!L) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { - DCHECK(is_int26(offset)); - return BranchShortHelperR6(offset, nullptr, cond, rs, rt); - } else { - DCHECK(is_int16(offset)); - return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot); - } - } else { - DCHECK_EQ(offset, 0); - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { - return BranchShortHelperR6(0, L, cond, rs, rt); - } else { - return BranchShortHelper(0, L, cond, rs, rt, bdslot); - } - } -} - -void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot) { - BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); -} - -void TurboAssembler::BranchShort(Label* L, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot) { - BranchShortCheck(0, L, cond, rs, rt, bdslot); -} - -void TurboAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) { - BranchAndLinkShort(offset, bdslot); -} - -void TurboAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot) { - bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot); - DCHECK(is_near); - USE(is_near); -} - -void TurboAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { - if (L->is_bound()) { - if (is_near_branch(L)) { - BranchAndLinkShort(L, bdslot); - } else { - BranchAndLinkLong(L, bdslot); - } - } else { - if (is_trampoline_emitted()) { - BranchAndLinkLong(L, bdslot); - } else { - BranchAndLinkShort(L, bdslot); - } - } -} - -void TurboAssembler::BranchAndLink(Label* L, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot) { - if (L->is_bound()) { - if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) { - Label skip; - Condition neg_cond = NegateCondition(cond); - BranchShort(&skip, neg_cond, rs, rt); - BranchAndLinkLong(L, bdslot); - bind(&skip); - } - } else { - if (is_trampoline_emitted()) { - Label skip; - Condition neg_cond = NegateCondition(cond); - BranchShort(&skip, neg_cond, rs, rt); - BranchAndLinkLong(L, bdslot); - bind(&skip); - } else { - BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot); - } - } -} - -void TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, - BranchDelaySlot bdslot) { - DCHECK(L == nullptr || offset == 0); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bal(offset); - - // Emit a nop in the branch delay slot if required. - if (bdslot == PROTECT) nop(); -} - -void TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) { - DCHECK(L == nullptr || offset == 0); - offset = GetOffset(offset, L, OffsetSize::kOffset26); - balc(offset); -} - -void TurboAssembler::BranchAndLinkShort(int32_t offset, - BranchDelaySlot bdslot) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { - DCHECK(is_int26(offset)); - BranchAndLinkShortHelperR6(offset, nullptr); - } else { - DCHECK(is_int16(offset)); - BranchAndLinkShortHelper(offset, nullptr, bdslot); - } -} - -void TurboAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { - BranchAndLinkShortHelperR6(0, L); - } else { - BranchAndLinkShortHelper(0, L, bdslot); - } -} - -bool TurboAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, - Condition cond, Register rs, - const Operand& rt) { - DCHECK(L == nullptr || offset == 0); - UseScratchRegisterScope temps(this); - Register scratch = temps.hasAvailable() ? temps.Acquire() : t8; - OffsetSize bits = OffsetSize::kOffset16; - - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset)); - switch (cond) { - case cc_always: - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; - balc(offset); - break; - case eq: - if (!is_near(L, bits)) return false; - Subu(scratch, rs, rt); - offset = GetOffset(offset, L, bits); - beqzalc(scratch, offset); - break; - case ne: - if (!is_near(L, bits)) return false; - Subu(scratch, rs, rt); - offset = GetOffset(offset, L, bits); - bnezalc(scratch, offset); - break; - - // Signed comparison. - case greater: - // rs > rt - if (rs.code() == rt.rm().code()) { - break; // No code needs to be emitted. - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - bltzalc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; - bgtzalc(rs, offset); - } else { - if (!is_near(L, bits)) return false; - Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - offset = GetOffset(offset, L, bits); - bnezalc(scratch, offset); - } - break; - case greater_equal: - // rs >= rt - if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; - balc(offset); - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - blezalc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; - bgezalc(rs, offset); - } else { - if (!is_near(L, bits)) return false; - Slt(scratch, rs, rt); - offset = GetOffset(offset, L, bits); - beqzalc(scratch, offset); - } - break; - case less: - // rs < rt - if (rs.code() == rt.rm().code()) { - break; // No code needs to be emitted. - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - bgtzalc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; - bltzalc(rs, offset); - } else { - if (!is_near(L, bits)) return false; - Slt(scratch, rs, rt); - offset = GetOffset(offset, L, bits); - bnezalc(scratch, offset); - } - break; - case less_equal: - // rs <= r2 - if (rs.code() == rt.rm().code()) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset26)) return false; - balc(offset); - } else if (rs == zero_reg) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16, &scratch, rt)) - return false; - bgezalc(scratch, offset); - } else if (IsZero(rt)) { - if (!CalculateOffset(L, &offset, OffsetSize::kOffset16)) return false; - blezalc(rs, offset); - } else { - if (!is_near(L, bits)) return false; - Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - offset = GetOffset(offset, L, bits); - beqzalc(scratch, offset); - } - break; - - // Unsigned comparison. - case Ugreater: - // rs > r2 - if (!is_near(L, bits)) return false; - Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - offset = GetOffset(offset, L, bits); - bnezalc(scratch, offset); - break; - case Ugreater_equal: - // rs >= r2 - if (!is_near(L, bits)) return false; - Sltu(scratch, rs, rt); - offset = GetOffset(offset, L, bits); - beqzalc(scratch, offset); - break; - case Uless: - // rs < r2 - if (!is_near(L, bits)) return false; - Sltu(scratch, rs, rt); - offset = GetOffset(offset, L, bits); - bnezalc(scratch, offset); - break; - case Uless_equal: - // rs <= r2 - if (!is_near(L, bits)) return false; - Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - offset = GetOffset(offset, L, bits); - beqzalc(scratch, offset); - break; - default: - UNREACHABLE(); - } - return true; -} - -// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly -// with the slt instructions. We could use sub or add instead but we would miss -// overflow cases, so we keep slt and add an intermediate third instruction. -bool TurboAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, - Condition cond, Register rs, - const Operand& rt, - BranchDelaySlot bdslot) { - DCHECK(L == nullptr || offset == 0); - if (!is_near(L, OffsetSize::kOffset16)) return false; - - Register scratch = t8; - BlockTrampolinePoolScope block_trampoline_pool(this); - - switch (cond) { - case cc_always: - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bal(offset); - break; - case eq: - bne(rs, GetRtAsRegisterHelper(rt, scratch), 2); - nop(); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bal(offset); - break; - case ne: - beq(rs, GetRtAsRegisterHelper(rt, scratch), 2); - nop(); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bal(offset); - break; - - // Signed comparison. - case greater: - Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - addiu(scratch, scratch, -1); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bgezal(scratch, offset); - break; - case greater_equal: - Slt(scratch, rs, rt); - addiu(scratch, scratch, -1); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bltzal(scratch, offset); - break; - case less: - Slt(scratch, rs, rt); - addiu(scratch, scratch, -1); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bgezal(scratch, offset); - break; - case less_equal: - Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - addiu(scratch, scratch, -1); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bltzal(scratch, offset); - break; - - // Unsigned comparison. - case Ugreater: - Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - addiu(scratch, scratch, -1); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bgezal(scratch, offset); - break; - case Ugreater_equal: - Sltu(scratch, rs, rt); - addiu(scratch, scratch, -1); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bltzal(scratch, offset); - break; - case Uless: - Sltu(scratch, rs, rt); - addiu(scratch, scratch, -1); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bgezal(scratch, offset); - break; - case Uless_equal: - Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); - addiu(scratch, scratch, -1); - offset = GetOffset(offset, L, OffsetSize::kOffset16); - bltzal(scratch, offset); - break; - - default: - UNREACHABLE(); - } - - // Emit a nop in the branch delay slot if required. - if (bdslot == PROTECT) nop(); - - return true; -} - -bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, - Condition cond, Register rs, - const Operand& rt, - BranchDelaySlot bdslot) { - BRANCH_ARGS_CHECK(cond, rs, rt); - - if (!L) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { - DCHECK(is_int26(offset)); - return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt); - } else { - DCHECK(is_int16(offset)); - return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot); - } - } else { - DCHECK_EQ(offset, 0); - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { - return BranchAndLinkShortHelperR6(0, L, cond, rs, rt); - } else { - return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot); - } - } -} - -void TurboAssembler::LoadFromConstantsTable(Register destination, - int constant_index) { - ASM_CODE_COMMENT(this); - DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable)); - LoadRoot(destination, RootIndex::kBuiltinsConstantsTable); - lw(destination, - FieldMemOperand(destination, - FixedArray::kHeaderSize + constant_index * kPointerSize)); -} - -void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) { - lw(destination, MemOperand(kRootRegister, offset)); -} - -void TurboAssembler::LoadRootRegisterOffset(Register destination, - intptr_t offset) { - if (offset == 0) { - Move(destination, kRootRegister); - } else { - Addu(destination, kRootRegister, offset); - } -} - -void TurboAssembler::Jump(Register target, int16_t offset, Condition cond, - Register rs, const Operand& rt, BranchDelaySlot bd) { - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK(is_int16(offset)); - if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { - if (cond == cc_always) { - jic(target, offset); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jic(target, offset); - } - } else { - if (offset != 0) { - Addu(target, target, offset); - } - if (cond == cc_always) { - jr(target); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jr(target); - } - // Emit a nop in the branch delay slot if required. - if (bd == PROTECT) nop(); - } -} - -void TurboAssembler::Jump(Register target, Register base, int16_t offset, - Condition cond, Register rs, const Operand& rt, - BranchDelaySlot bd) { - DCHECK(is_int16(offset)); - BlockTrampolinePoolScope block_trampoline_pool(this); - if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { - if (cond == cc_always) { - jic(base, offset); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jic(base, offset); - } - } else { - if (offset != 0) { - Addu(target, base, offset); - } else { // Call through target - if (target != base) mov(target, base); - } - if (cond == cc_always) { - jr(target); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jr(target); - } - // Emit a nop in the branch delay slot if required. - if (bd == PROTECT) nop(); - } -} - -void TurboAssembler::Jump(Register target, const Operand& offset, - Condition cond, Register rs, const Operand& rt, - BranchDelaySlot bd) { - BlockTrampolinePoolScope block_trampoline_pool(this); - if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && - !is_int16(offset.immediate())) { - uint32_t aui_offset, jic_offset; - Assembler::UnpackTargetAddressUnsigned(offset.immediate(), &aui_offset, - &jic_offset); - RecordRelocInfo(RelocInfo::EXTERNAL_REFERENCE, offset.immediate()); - aui(target, target, aui_offset); - if (cond == cc_always) { - jic(target, jic_offset); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jic(target, jic_offset); - } - } else { - if (offset.immediate() != 0) { - Addu(target, target, offset); - } - if (cond == cc_always) { - jr(target); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jr(target); - } - // Emit a nop in the branch delay slot if required. - if (bd == PROTECT) nop(); - } -} - -void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, - Condition cond, Register rs, const Operand& rt, - BranchDelaySlot bd) { - BlockTrampolinePoolScope block_trampoline_pool(this); - Label skip; - if (cond != cc_always) { - Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt); - } - // The first instruction of 'li' may be placed in the delay slot. - // This is not an issue, t9 is expected to be clobbered anyway. - if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { - uint32_t lui_offset, jic_offset; - UnpackTargetAddressUnsigned(target, &lui_offset, &jic_offset); - if (MustUseReg(rmode)) { - RecordRelocInfo(rmode, target); - } - lui(t9, lui_offset); - Jump(t9, jic_offset, al, zero_reg, Operand(zero_reg), bd); - } else { - li(t9, Operand(target, rmode)); - Jump(t9, 0, al, zero_reg, Operand(zero_reg), bd); - } - bind(&skip); -} - -void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, - Register rs, const Operand& rt, BranchDelaySlot bd) { - DCHECK(!RelocInfo::IsCodeTarget(rmode)); - Jump(static_cast(target), rmode, cond, rs, rt, bd); -} - -void TurboAssembler::Jump(Handle code, RelocInfo::Mode rmode, - Condition cond, Register rs, const Operand& rt, - BranchDelaySlot bd) { - DCHECK(RelocInfo::IsCodeTarget(rmode)); - BlockTrampolinePoolScope block_trampoline_pool(this); - - Builtin builtin = Builtin::kNoBuiltinId; - bool target_is_isolate_independent_builtin = - isolate()->builtins()->IsBuiltinHandle(code, &builtin) && - Builtins::IsIsolateIndependent(builtin); - if (target_is_isolate_independent_builtin && - options().use_pc_relative_calls_and_jumps) { - int32_t code_target_index = AddCodeTarget(code); - Label skip; - BlockTrampolinePoolScope block_trampoline_pool(this); - if (cond != cc_always) { - // By using delay slot, we always execute first instruction of - // GenPcRelativeJump (which is or_(t8, ra, zero_reg)). - Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt); - } - GenPCRelativeJump(t8, t9, code_target_index, - RelocInfo::RELATIVE_CODE_TARGET, bd); - bind(&skip); - return; - } else if (root_array_available_ && options().isolate_independent_code) { - IndirectLoadConstant(t9, code); - Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd); - return; - } else if (target_is_isolate_independent_builtin && - options().inline_offheap_trampolines) { - // Inline the trampoline. - RecordCommentForOffHeapTrampoline(builtin); - li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); - Jump(t9, 0, cond, rs, rt, bd); - RecordComment("]"); - return; - } - - Jump(static_cast(code.address()), rmode, cond, rs, rt, bd); -} - -void TurboAssembler::Jump(const ExternalReference& reference) { - li(t9, reference); - Jump(t9); -} - -void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit, - unsigned higher_limit, - Label* on_in_range) { - ASM_CODE_COMMENT(this); - if (lower_limit != 0) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - Subu(scratch, value, Operand(lower_limit)); - Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit)); - } else { - Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit)); - } -} - -// Note: To call gcc-compiled C code on mips, you must call through t9. -void TurboAssembler::Call(Register target, int16_t offset, Condition cond, - Register rs, const Operand& rt, BranchDelaySlot bd) { - DCHECK(is_int16(offset)); - BlockTrampolinePoolScope block_trampoline_pool(this); - if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { - if (cond == cc_always) { - jialc(target, offset); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jialc(target, offset); - } - } else { - if (offset != 0) { - Addu(target, target, offset); - } - if (cond == cc_always) { - jalr(target); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jalr(target); - } - // Emit a nop in the branch delay slot if required. - if (bd == PROTECT) nop(); - } - set_pc_for_safepoint(); -} - -// Note: To call gcc-compiled C code on mips, you must call through t9. -void TurboAssembler::Call(Register target, Register base, int16_t offset, - Condition cond, Register rs, const Operand& rt, - BranchDelaySlot bd) { - DCHECK(is_uint16(offset)); - BlockTrampolinePoolScope block_trampoline_pool(this); - if (IsMipsArchVariant(kMips32r6) && bd == PROTECT) { - if (cond == cc_always) { - jialc(base, offset); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jialc(base, offset); - } - } else { - if (offset != 0) { - Addu(target, base, offset); - } else { // Call through target - if (target != base) mov(target, base); - } - if (cond == cc_always) { - jalr(target); - } else { - BRANCH_ARGS_CHECK(cond, rs, rt); - Branch(2, NegateCondition(cond), rs, rt); - jalr(target); - } - // Emit a nop in the branch delay slot if required. - if (bd == PROTECT) nop(); - } - set_pc_for_safepoint(); -} - -void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond, - Register rs, const Operand& rt, BranchDelaySlot bd) { - CheckBuffer(); - BlockTrampolinePoolScope block_trampoline_pool(this); - int32_t target_int = static_cast(target); - if (IsMipsArchVariant(kMips32r6) && bd == PROTECT && cond == cc_always) { - uint32_t lui_offset, jialc_offset; - UnpackTargetAddressUnsigned(target_int, &lui_offset, &jialc_offset); - if (MustUseReg(rmode)) { - RecordRelocInfo(rmode, target_int); - } - lui(t9, lui_offset); - Call(t9, jialc_offset, cond, rs, rt, bd); - } else { - li(t9, Operand(target_int, rmode), CONSTANT_SIZE); - Call(t9, 0, cond, rs, rt, bd); - } -} - -void TurboAssembler::Call(Handle code, RelocInfo::Mode rmode, - Condition cond, Register rs, const Operand& rt, - BranchDelaySlot bd) { - BlockTrampolinePoolScope block_trampoline_pool(this); - - Builtin builtin = Builtin::kNoBuiltinId; - bool target_is_isolate_independent_builtin = - isolate()->builtins()->IsBuiltinHandle(code, &builtin) && - Builtins::IsIsolateIndependent(builtin); - if (target_is_isolate_independent_builtin && - options().use_pc_relative_calls_and_jumps) { - int32_t code_target_index = AddCodeTarget(code); - Label skip; - BlockTrampolinePoolScope block_trampoline_pool(this); - if (cond != cc_always) { - Branch(PROTECT, &skip, NegateCondition(cond), rs, rt); - } - GenPCRelativeJumpAndLink(t8, code_target_index, - RelocInfo::RELATIVE_CODE_TARGET, bd); - bind(&skip); - return; - } else if (root_array_available_ && options().isolate_independent_code) { - IndirectLoadConstant(t9, code); - Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd); - return; - } else if (target_is_isolate_independent_builtin && - options().inline_offheap_trampolines) { - // Inline the trampoline. - RecordCommentForOffHeapTrampoline(builtin); - li(t9, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET)); - Call(t9, 0, cond, rs, rt, bd); - RecordComment("]"); - return; - } - - DCHECK(RelocInfo::IsCodeTarget(rmode)); - DCHECK(code->IsExecutable()); - Call(code.address(), rmode, cond, rs, rt, bd); -} - -void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) { - ASM_CODE_COMMENT(this); - static_assert(kSystemPointerSize == 4); - static_assert(kSmiShiftSize == 0); - static_assert(kSmiTagSize == 1); - static_assert(kSmiTag == 0); - - // The builtin_index register contains the builtin index as a Smi. - SmiUntag(builtin_index, builtin_index); - Lsa(builtin_index, kRootRegister, builtin_index, kSystemPointerSizeLog2); - lw(builtin_index, - MemOperand(builtin_index, IsolateData::builtin_entry_table_offset())); -} -void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin, - Register destination) { - Lw(destination, EntryFromBuiltinAsOperand(builtin)); -} -MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) { - DCHECK(root_array_available()); - return MemOperand(kRootRegister, - IsolateData::BuiltinEntrySlotOffset(builtin)); -} - -void TurboAssembler::CallBuiltinByIndex(Register builtin_index) { - ASM_CODE_COMMENT(this); - LoadEntryFromBuiltinIndex(builtin_index); - Call(builtin_index); -} -void TurboAssembler::CallBuiltin(Builtin builtin) { - RecordCommentForOffHeapTrampoline(builtin); - Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET); - RecordComment("]"); -} - -void TurboAssembler::PatchAndJump(Address target) { - if (kArchVariant != kMips32r6) { - ASM_CODE_COMMENT(this); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - mov(scratch, ra); - bal(1); // jump to lw - nop(); // in the delay slot - lw(t9, MemOperand(ra, kInstrSize * 3)); // ra == pc_ - jr(t9); - mov(ra, scratch); // in delay slot - DCHECK_EQ(reinterpret_cast(pc_) % 8, 0); - *reinterpret_cast(pc_) = target; - pc_ += sizeof(uint32_t); - } else { - // TODO(mips r6): Implement. - UNIMPLEMENTED(); - } -} - -void TurboAssembler::StoreReturnAddressAndCall(Register target) { - ASM_CODE_COMMENT(this); - // This generates the final instruction sequence for calls to C functions - // once an exit frame has been constructed. - // - // Note that this assumes the caller code (i.e. the Code object currently - // being generated) is immovable or that the callee function cannot trigger - // GC, since the callee function will return to it. - - Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); - static constexpr int kNumInstructionsToJump = 4; - Label find_ra; - // Adjust the value in ra to point to the correct return location, 2nd - // instruction past the real call into C code (the jalr(t9)), and push it. - // This is the return address of the exit frame. - if (kArchVariant >= kMips32r6) { - addiupc(ra, kNumInstructionsToJump + 1); - } else { - // This no-op-and-link sequence saves PC + 8 in ra register on pre-r6 MIPS - nal(); // nal has branch delay slot. - Addu(ra, ra, kNumInstructionsToJump * kInstrSize); - } - bind(&find_ra); - - // This spot was reserved in EnterExitFrame. - sw(ra, MemOperand(sp)); - // Stack space reservation moved to the branch delay slot below. - // Stack is still aligned. - - // Call the C routine. - mov(t9, target); // Function pointer to t9 to conform to ABI for PIC. - jalr(t9); - // Set up sp in the delay slot. - addiu(sp, sp, -kCArgsSlotsSize); - // Make sure the stored 'ra' points to this position. - DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra)); -} - -void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt, - BranchDelaySlot bd) { - Jump(ra, 0, cond, rs, rt, bd); -} - -void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT && - (!L->is_bound() || is_near_r6(L))) { - BranchShortHelperR6(0, L); - } else { - // Generate position independent long branch. - BlockTrampolinePoolScope block_trampoline_pool(this); - int32_t imm32; - imm32 = branch_long_offset(L); - GenPCRelativeJump(t8, t9, imm32, RelocInfo::NO_INFO, bdslot); - } -} - -void TurboAssembler::BranchLong(int32_t offset, BranchDelaySlot bdslot) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT && (is_int26(offset))) { - BranchShortHelperR6(offset, nullptr); - } else { - // Generate position independent long branch. - BlockTrampolinePoolScope block_trampoline_pool(this); - GenPCRelativeJump(t8, t9, offset, RelocInfo::NO_INFO, bdslot); - } -} - -void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT && - (!L->is_bound() || is_near_r6(L))) { - BranchAndLinkShortHelperR6(0, L); - } else { - // Generate position independent long branch and link. - BlockTrampolinePoolScope block_trampoline_pool(this); - int32_t imm32; - imm32 = branch_long_offset(L); - GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NO_INFO, bdslot); - } -} - -void TurboAssembler::DropArguments(Register count, ArgumentsCountType type, - ArgumentsCountMode mode) { - switch (type) { - case kCountIsInteger: { - Lsa(sp, sp, count, kPointerSizeLog2); - break; - } - case kCountIsSmi: { - static_assert(kSmiTagSize == 1 && kSmiTag == 0); - Lsa(sp, sp, count, kPointerSizeLog2 - kSmiTagSize, count); - break; - } - case kCountIsBytes: { - Addu(sp, sp, count); - break; - } - } - if (mode == kCountExcludesReceiver) { - Addu(sp, sp, kSystemPointerSize); - } -} - -void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc, - Register receiver, - ArgumentsCountType type, - ArgumentsCountMode mode) { - DCHECK(!AreAliased(argc, receiver)); - if (mode == kCountExcludesReceiver) { - // Drop arguments without receiver and override old receiver. - DropArguments(argc, type, kCountIncludesReceiver); - sw(receiver, MemOperand(sp)); - } else { - DropArguments(argc, type, mode); - push(receiver); - } -} - -void TurboAssembler::DropAndRet(int drop) { - int32_t drop_size = drop * kSystemPointerSize; - DCHECK(is_int31(drop_size)); - - if (is_int16(drop_size)) { - Ret(USE_DELAY_SLOT); - addiu(sp, sp, drop_size); - } else { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, drop_size); - Ret(USE_DELAY_SLOT); - addu(sp, sp, scratch); - } -} - -void TurboAssembler::DropAndRet(int drop, Condition cond, Register r1, - const Operand& r2) { - // Both Drop and Ret need to be conditional. - Label skip; - if (cond != cc_always) { - Branch(&skip, NegateCondition(cond), r1, r2); - } - - Drop(drop); - Ret(); - - if (cond != cc_always) { - bind(&skip); - } -} - -void TurboAssembler::Drop(int count, Condition cond, Register reg, - const Operand& op) { - if (count <= 0) { - return; - } - - Label skip; - - if (cond != al) { - Branch(&skip, NegateCondition(cond), reg, op); - } - - Addu(sp, sp, Operand(count * kPointerSize)); - - if (cond != al) { - bind(&skip); - } -} - -void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) { - if (scratch == no_reg) { - Xor(reg1, reg1, Operand(reg2)); - Xor(reg2, reg2, Operand(reg1)); - Xor(reg1, reg1, Operand(reg2)); - } else { - mov(scratch, reg1); - mov(reg1, reg2); - mov(reg2, scratch); - } -} - -void TurboAssembler::Call(Label* target) { BranchAndLink(target); } - -void TurboAssembler::LoadAddress(Register dst, Label* target) { - uint32_t address = jump_address(target); - li(dst, address); -} - -void TurboAssembler::Push(Handle handle) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, Operand(handle)); - push(scratch); -} - -void TurboAssembler::Push(Smi smi) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, Operand(smi)); - push(scratch); -} - -void TurboAssembler::PushArray(Register array, Register size, Register scratch, - Register scratch2, PushArrayOrder order) { - DCHECK(!AreAliased(array, size, scratch, scratch2)); - Label loop, entry; - if (order == PushArrayOrder::kReverse) { - mov(scratch, zero_reg); - jmp(&entry); - bind(&loop); - Lsa(scratch2, array, scratch, kPointerSizeLog2); - Lw(scratch2, MemOperand(scratch2)); - push(scratch2); - Addu(scratch, scratch, Operand(1)); - bind(&entry); - Branch(&loop, less, scratch, Operand(size)); - } else { - mov(scratch, size); - jmp(&entry); - bind(&loop); - Lsa(scratch2, array, scratch, kPointerSizeLog2); - Lw(scratch2, MemOperand(scratch2)); - push(scratch2); - bind(&entry); - Addu(scratch, scratch, Operand(-1)); - Branch(&loop, greater_equal, scratch, Operand(zero_reg)); - } -} - -// --------------------------------------------------------------------------- -// Exception handling. - -void MacroAssembler::PushStackHandler() { - // Adjust this code if not the case. - static_assert(StackHandlerConstants::kSize == 2 * kPointerSize); - static_assert(StackHandlerConstants::kNextOffset == 0 * kPointerSize); - - Push(Smi::zero()); // Padding. - - // Link the current handler as the next handler. - li(t2, - ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); - lw(t1, MemOperand(t2)); - push(t1); - - // Set this new handler as the current one. - sw(sp, MemOperand(t2)); -} - -void MacroAssembler::PopStackHandler() { - static_assert(StackHandlerConstants::kNextOffset == 0); - pop(a1); - Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, - ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate())); - sw(a1, MemOperand(scratch)); -} - -void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, - const DoubleRegister src) { - sub_d(dst, src, kDoubleRegZero); -} - -void TurboAssembler::MovFromFloatResult(DoubleRegister dst) { - if (IsMipsSoftFloatABI) { - if (kArchEndian == kLittle) { - Move(dst, v0, v1); - } else { - Move(dst, v1, v0); - } - } else { - Move(dst, f0); // Reg f0 is o32 ABI FP return value. - } -} - -void TurboAssembler::MovFromFloatParameter(DoubleRegister dst) { - if (IsMipsSoftFloatABI) { - if (kArchEndian == kLittle) { - Move(dst, a0, a1); - } else { - Move(dst, a1, a0); - } - } else { - Move(dst, f12); // Reg f12 is o32 ABI FP first argument value. - } -} - -void TurboAssembler::MovToFloatParameter(DoubleRegister src) { - if (!IsMipsSoftFloatABI) { - Move(f12, src); - } else { - if (kArchEndian == kLittle) { - Move(a0, a1, src); - } else { - Move(a1, a0, src); - } - } -} - -void TurboAssembler::MovToFloatResult(DoubleRegister src) { - if (!IsMipsSoftFloatABI) { - Move(f0, src); - } else { - if (kArchEndian == kLittle) { - Move(v0, v1, src); - } else { - Move(v1, v0, src); - } - } -} - -void TurboAssembler::MovToFloatParameters(DoubleRegister src1, - DoubleRegister src2) { - if (!IsMipsSoftFloatABI) { - if (src2 == f12) { - DCHECK(src1 != f14); - Move(f14, src2); - Move(f12, src1); - } else { - Move(f12, src1); - Move(f14, src2); - } - } else { - if (kArchEndian == kLittle) { - Move(a0, a1, src1); - Move(a2, a3, src2); - } else { - Move(a1, a0, src1); - Move(a3, a2, src2); - } - } -} - -// ----------------------------------------------------------------------------- -// JavaScript invokes. - -void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) { - ASM_CODE_COMMENT(this); - DCHECK(root_array_available()); - Isolate* isolate = this->isolate(); - ExternalReference limit = - kind == StackLimitKind::kRealStackLimit - ? ExternalReference::address_of_real_jslimit(isolate) - : ExternalReference::address_of_jslimit(isolate); - DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit)); - - intptr_t offset = - TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit); - CHECK(is_int32(offset)); - Lw(destination, MemOperand(kRootRegister, static_cast(offset))); -} - -void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1, - Register scratch2, - Label* stack_overflow) { - ASM_CODE_COMMENT(this); - // Check the stack for overflow. We are not trying to catch - // interruptions (e.g. debug break and preemption) here, so the "real stack - // limit" is checked. - - LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit); - // Make scratch1 the space we have left. The stack might already be overflowed - // here which will cause scratch1 to become negative. - subu(scratch1, sp, scratch1); - // Check if the arguments will overflow the stack. - sll(scratch2, num_args, kPointerSizeLog2); - // Signed comparison. - Branch(stack_overflow, le, scratch1, Operand(scratch2)); -} - -void MacroAssembler::TestCodeTIsMarkedForDeoptimizationAndJump(Register codet, - Register scratch, - Condition cond, - Label* target) { - Lw(scratch, FieldMemOperand(codet, Code::kCodeDataContainerOffset)); - Lw(scratch, - FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); - And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit)); - Branch(target, cond, scratch, Operand(zero_reg)); -} - -Operand MacroAssembler::ClearedValue() const { - return Operand( - static_cast(HeapObjectReference::ClearedValue(isolate()).ptr())); -} - -void MacroAssembler::InvokePrologue(Register expected_parameter_count, - Register actual_parameter_count, - Label* done, InvokeType type) { - ASM_CODE_COMMENT(this); - Label regular_invoke; - - // a0: actual arguments count - // a1: function (passed through to callee) - // a2: expected arguments count - - DCHECK_EQ(actual_parameter_count, a0); - DCHECK_EQ(expected_parameter_count, a2); - - // If the expected parameter count is equal to the adaptor sentinel, no need - // to push undefined value as arguments. - if (kDontAdaptArgumentsSentinel != 0) { - Branch(®ular_invoke, eq, expected_parameter_count, - Operand(kDontAdaptArgumentsSentinel)); - } - - // If overapplication or if the actual argument count is equal to the - // formal parameter count, no need to push extra undefined values. - Subu(expected_parameter_count, expected_parameter_count, - actual_parameter_count); - Branch(®ular_invoke, le, expected_parameter_count, Operand(zero_reg)); - - Label stack_overflow; - StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow); - // Underapplication. Move the arguments already in the stack, including the - // receiver and the return address. - { - Label copy; - Register src = t3, dest = t4; - mov(src, sp); - sll(t0, expected_parameter_count, kSystemPointerSizeLog2); - Subu(sp, sp, Operand(t0)); - // Update stack pointer. - mov(dest, sp); - mov(t0, a0); - bind(©); - Lw(t1, MemOperand(src, 0)); - Sw(t1, MemOperand(dest, 0)); - Subu(t0, t0, Operand(1)); - Addu(src, src, Operand(kSystemPointerSize)); - Addu(dest, dest, Operand(kSystemPointerSize)); - Branch(©, gt, t0, Operand(zero_reg)); - } - - // Fill remaining expected arguments with undefined values. - LoadRoot(t0, RootIndex::kUndefinedValue); - { - Label loop; - bind(&loop); - Sw(t0, MemOperand(t4, 0)); - Subu(expected_parameter_count, expected_parameter_count, Operand(1)); - Addu(t4, t4, Operand(kSystemPointerSize)); - Branch(&loop, gt, expected_parameter_count, Operand(zero_reg)); - } - b(®ular_invoke); - nop(); - - bind(&stack_overflow); - { - FrameScope frame( - this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL); - CallRuntime(Runtime::kThrowStackOverflow); - break_(0xCC); - } - - bind(®ular_invoke); -} - -void MacroAssembler::CheckDebugHook(Register fun, Register new_target, - Register expected_parameter_count, - Register actual_parameter_count) { - Label skip_hook; - li(t0, ExternalReference::debug_hook_on_function_call_address(isolate())); - lb(t0, MemOperand(t0)); - Branch(&skip_hook, eq, t0, Operand(zero_reg)); - - { - // Load receiver to pass it later to DebugOnFunctionCall hook. - LoadReceiver(t0, actual_parameter_count); - - FrameScope frame( - this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL); - SmiTag(expected_parameter_count); - Push(expected_parameter_count); - - SmiTag(actual_parameter_count); - Push(actual_parameter_count); - - if (new_target.is_valid()) { - Push(new_target); - } - Push(fun); - Push(fun); - Push(t0); - CallRuntime(Runtime::kDebugOnFunctionCall); - Pop(fun); - if (new_target.is_valid()) { - Pop(new_target); - } - - Pop(actual_parameter_count); - SmiUntag(actual_parameter_count); - - Pop(expected_parameter_count); - SmiUntag(expected_parameter_count); - } - bind(&skip_hook); -} - -void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, - Register expected_parameter_count, - Register actual_parameter_count, - InvokeType type) { - // You can't call a function without a valid frame. - DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); - DCHECK_EQ(function, a1); - DCHECK_IMPLIES(new_target.is_valid(), new_target == a3); - - // On function call, call into the debugger if necessary. - CheckDebugHook(function, new_target, expected_parameter_count, - actual_parameter_count); - - // Clear the new.target register if not given. - if (!new_target.is_valid()) { - LoadRoot(a3, RootIndex::kUndefinedValue); - } - - Label done; - InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type); - // We call indirectly through the code field in the function to - // allow recompilation to take effect without changing any of the - // call sites. - Register code = kJavaScriptCallCodeStartRegister; - lw(code, FieldMemOperand(function, JSFunction::kCodeOffset)); - switch (type) { - case InvokeType::kCall: - Addu(code, code, Code::kHeaderSize - kHeapObjectTag); - Call(code); - break; - case InvokeType::kJump: - Addu(code, code, Code::kHeaderSize - kHeapObjectTag); - Jump(code); - break; - } - - // Continue here if InvokePrologue does handle the invocation due to - // mismatched parameter counts. - bind(&done); -} - -void MacroAssembler::InvokeFunctionWithNewTarget( - Register function, Register new_target, Register actual_parameter_count, - InvokeType type) { - ASM_CODE_COMMENT(this); - // You can't call a function without a valid frame. - DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); - - // Contract with called JS functions requires that function is passed in a1. - DCHECK_EQ(function, a1); - Register expected_reg = a2; - Register temp_reg = t0; - - lw(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); - lhu(expected_reg, - FieldMemOperand(temp_reg, - SharedFunctionInfo::kFormalParameterCountOffset)); - - InvokeFunctionCode(function, new_target, expected_reg, actual_parameter_count, - type); -} - -void MacroAssembler::InvokeFunction(Register function, - Register expected_parameter_count, - Register actual_parameter_count, - InvokeType type) { - ASM_CODE_COMMENT(this); - // You can't call a function without a valid frame. - DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); - - // Contract with called JS functions requires that function is passed in a1. - DCHECK_EQ(function, a1); - - // Get the function and setup the context. - lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); - - InvokeFunctionCode(a1, no_reg, expected_parameter_count, - actual_parameter_count, type); -} - -// --------------------------------------------------------------------------- -// Support functions. - -void MacroAssembler::GetObjectType(Register object, Register map, - Register type_reg) { - LoadMap(map, object); - lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); -} - -void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg, - InstanceType lower_limit, - Register range) { - lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); - Subu(range, type_reg, Operand(lower_limit)); -} - -// ----------------------------------------------------------------------------- -// Runtime calls. - -void TurboAssembler::AddOverflow(Register dst, Register left, - const Operand& right, Register overflow) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - Register right_reg = no_reg; - Register scratch = t8; - if (!right.is_reg()) { - li(at, Operand(right)); - right_reg = at; - } else { - right_reg = right.rm(); - } - - DCHECK(left != scratch && right_reg != scratch && dst != scratch && - overflow != scratch); - DCHECK(overflow != left && overflow != right_reg); - - if (dst == left || dst == right_reg) { - addu(scratch, left, right_reg); - xor_(overflow, scratch, left); - xor_(at, scratch, right_reg); - and_(overflow, overflow, at); - mov(dst, scratch); - } else { - addu(dst, left, right_reg); - xor_(overflow, dst, left); - xor_(at, dst, right_reg); - and_(overflow, overflow, at); - } -} - -void TurboAssembler::SubOverflow(Register dst, Register left, - const Operand& right, Register overflow) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - Register right_reg = no_reg; - Register scratch = t8; - if (!right.is_reg()) { - li(at, Operand(right)); - right_reg = at; - } else { - right_reg = right.rm(); - } - - DCHECK(left != scratch && right_reg != scratch && dst != scratch && - overflow != scratch); - DCHECK(overflow != left && overflow != right_reg); - - if (dst == left || dst == right_reg) { - subu(scratch, left, right_reg); - xor_(overflow, left, scratch); - xor_(at, left, right_reg); - and_(overflow, overflow, at); - mov(dst, scratch); - } else { - subu(dst, left, right_reg); - xor_(overflow, left, dst); - xor_(at, left, right_reg); - and_(overflow, overflow, at); - } -} - -void TurboAssembler::MulOverflow(Register dst, Register left, - const Operand& right, Register overflow) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - Register right_reg = no_reg; - Register scratch = t8; - Register scratch2 = t9; - if (!right.is_reg()) { - li(at, Operand(right)); - right_reg = at; - } else { - right_reg = right.rm(); - } - - DCHECK(left != scratch && right_reg != scratch && dst != scratch && - overflow != scratch); - DCHECK(overflow != left && overflow != right_reg); - - if (dst == left || dst == right_reg) { - Mul(overflow, scratch2, left, right_reg); - sra(scratch, scratch2, 31); - xor_(overflow, overflow, scratch); - mov(dst, scratch2); - } else { - Mul(overflow, dst, left, right_reg); - sra(scratch, dst, 31); - xor_(overflow, overflow, scratch); - } -} - -void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles) { - ASM_CODE_COMMENT(this); - // All parameters are on the stack. v0 has the return value after call. - - // If the expected number of arguments of the runtime function is - // constant, we check that the actual number of arguments match the - // expectation. - CHECK(f->nargs < 0 || f->nargs == num_arguments); - - // TODO(1236192): Most runtime routines don't need the number of - // arguments passed in because it is constant. At some point we - // should remove this need and make the runtime routine entry code - // smarter. - PrepareCEntryArgs(num_arguments); - PrepareCEntryFunction(ExternalReference::Create(f)); - Handle code = - CodeFactory::CEntry(isolate(), f->result_size, save_doubles); - Call(code, RelocInfo::CODE_TARGET); -} - -void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { - ASM_CODE_COMMENT(this); - const Runtime::Function* function = Runtime::FunctionForId(fid); - DCHECK_EQ(1, function->result_size); - if (function->nargs >= 0) { - PrepareCEntryArgs(function->nargs); - } - JumpToExternalReference(ExternalReference::Create(fid)); -} - -void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, - BranchDelaySlot bd, - bool builtin_exit_frame) { - PrepareCEntryFunction(builtin); - Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, - ArgvMode::kStack, builtin_exit_frame); - Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg), bd); -} - -void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) { - li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); - Jump(kOffHeapTrampolineRegister); -} - -void MacroAssembler::LoadWeakValue(Register out, Register in, - Label* target_if_cleared) { - Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32)); - - And(out, in, Operand(~kWeakHeapObjectMask)); -} - -void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value, - Register scratch1, - Register scratch2) { - DCHECK_GT(value, 0); - if (v8_flags.native_code_counters && counter->Enabled()) { - ASM_CODE_COMMENT(this); - li(scratch2, ExternalReference::Create(counter)); - lw(scratch1, MemOperand(scratch2)); - Addu(scratch1, scratch1, Operand(value)); - sw(scratch1, MemOperand(scratch2)); - } -} - -void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value, - Register scratch1, - Register scratch2) { - DCHECK_GT(value, 0); - if (v8_flags.native_code_counters && counter->Enabled()) { - ASM_CODE_COMMENT(this); - li(scratch2, ExternalReference::Create(counter)); - lw(scratch1, MemOperand(scratch2)); - Subu(scratch1, scratch1, Operand(value)); - sw(scratch1, MemOperand(scratch2)); - } -} - -// ----------------------------------------------------------------------------- -// Debugging. - -void TurboAssembler::Trap() { stop(); } -void TurboAssembler::DebugBreak() { stop(); } - -void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs, - Operand rt) { - if (v8_flags.debug_code) Check(cc, reason, rs, rt); -} - -void TurboAssembler::Check(Condition cc, AbortReason reason, Register rs, - Operand rt) { - Label L; - Branch(&L, cc, rs, rt); - Abort(reason); - // Will not return here. - bind(&L); -} - -void TurboAssembler::Abort(AbortReason reason) { - Label abort_start; - bind(&abort_start); - if (v8_flags.code_comments) { - const char* msg = GetAbortReason(reason); - RecordComment("Abort message: "); - RecordComment(msg); - } - - // Avoid emitting call to builtin if requested. - if (trap_on_abort()) { - stop(); - return; - } - - if (should_abort_hard()) { - // We don't care if we constructed a frame. Just pretend we did. - FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE); - PrepareCallCFunction(0, a0); - li(a0, Operand(static_cast(reason))); - CallCFunction(ExternalReference::abort_with_reason(), 1); - return; - } - - Move(a0, Smi::FromInt(static_cast(reason))); - - // Disable stub call restrictions to always allow calls to abort. - if (!has_frame_) { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(this, StackFrame::NO_FRAME_TYPE); - Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); - } else { - Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET); - } - // Will not return here. - if (is_trampoline_pool_blocked()) { - // If the calling code cares about the exact number of - // instructions generated, we insert padding here to keep the size - // of the Abort macro constant. - // Currently in debug mode with debug_code enabled the number of - // generated instructions is 10, so we use this as a maximum value. - static const int kExpectedAbortInstructions = 10; - int abort_instructions = InstructionsGeneratedSince(&abort_start); - DCHECK_LE(abort_instructions, kExpectedAbortInstructions); - while (abort_instructions++ < kExpectedAbortInstructions) { - nop(); - } - } -} - -void TurboAssembler::LoadMap(Register destination, Register object) { - Lw(destination, FieldMemOperand(object, HeapObject::kMapOffset)); -} - -void MacroAssembler::LoadNativeContextSlot(Register dst, int index) { - LoadMap(dst, cp); - Lw(dst, - FieldMemOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset)); - Lw(dst, MemOperand(dst, Context::SlotOffset(index))); -} - -void TurboAssembler::StubPrologue(StackFrame::Type type) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, Operand(StackFrame::TypeToMarker(type))); - PushCommonFrame(scratch); -} - -void TurboAssembler::Prologue() { PushStandardFrame(a1); } - -void TurboAssembler::EnterFrame(StackFrame::Type type) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - Push(ra, fp); - Move(fp, sp); - if (!StackFrame::IsJavaScript(type)) { - li(kScratchReg, Operand(StackFrame::TypeToMarker(type))); - Push(kScratchReg); - } -#if V8_ENABLE_WEBASSEMBLY - if (type == StackFrame::WASM) Push(kWasmInstanceRegister); -#endif // V8_ENABLE_WEBASSEMBLY -} - -void TurboAssembler::LeaveFrame(StackFrame::Type type) { - ASM_CODE_COMMENT(this); - addiu(sp, fp, 2 * kPointerSize); - lw(ra, MemOperand(fp, 1 * kPointerSize)); - lw(fp, MemOperand(fp, 0 * kPointerSize)); -} - -void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, - StackFrame::Type frame_type) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - DCHECK(frame_type == StackFrame::EXIT || - frame_type == StackFrame::BUILTIN_EXIT); - - // Set up the frame structure on the stack. - static_assert(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); - static_assert(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); - static_assert(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); - - // This is how the stack will look: - // fp + 2 (==kCallerSPDisplacement) - old stack's end - // [fp + 1 (==kCallerPCOffset)] - saved old ra - // [fp + 0 (==kCallerFPOffset)] - saved old fp - // [fp - 1 StackFrame::EXIT Smi - // [fp - 2 (==kSPOffset)] - sp of the called function - // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the - // new stack (will contain saved ra) - - // Save registers and reserve room for saved entry sp. - addiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp); - sw(ra, MemOperand(sp, 3 * kPointerSize)); - sw(fp, MemOperand(sp, 2 * kPointerSize)); - { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - li(scratch, Operand(StackFrame::TypeToMarker(frame_type))); - sw(scratch, MemOperand(sp, 1 * kPointerSize)); - } - // Set up new frame pointer. - addiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp); - - if (v8_flags.debug_code) { - sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); - } - - // Save the frame pointer and the context in top. - li(t8, - ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); - sw(fp, MemOperand(t8)); - li(t8, - ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); - sw(cp, MemOperand(t8)); - - const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); - if (save_doubles) { - // The stack must be align to 0 modulo 8 for stores with sdc1. - DCHECK_EQ(kDoubleSize, frame_alignment); - if (frame_alignment > 0) { - DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); - And(sp, sp, Operand(-frame_alignment)); // Align stack. - } - int space = FPURegister::kNumRegisters * kDoubleSize; - Subu(sp, sp, Operand(space)); - // Remember: we only need to save every 2nd double FPU value. - for (int i = 0; i < FPURegister::kNumRegisters; i += 2) { - FPURegister reg = FPURegister::from_code(i); - Sdc1(reg, MemOperand(sp, i * kDoubleSize)); - } - } - - // Reserve place for the return address, stack space and an optional slot - // (used by DirectCEntry to hold the return value if a struct is - // returned) and align the frame preparing for calling the runtime function. - DCHECK_GE(stack_space, 0); - Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); - if (frame_alignment > 0) { - DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); - And(sp, sp, Operand(-frame_alignment)); // Align stack. - } - - // Set the exit frame sp value to point just before the return address - // location. - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - addiu(scratch, sp, kPointerSize); - sw(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); -} - -void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, - bool do_return, - bool argument_count_is_length) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - // Optionally restore all double registers. - if (save_doubles) { - // Remember: we only need to restore every 2nd double FPU value. - lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); - for (int i = 0; i < FPURegister::kNumRegisters; i += 2) { - FPURegister reg = FPURegister::from_code(i); - Ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize)); - } - } - - // Clear top frame. - li(t8, - ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate())); - sw(zero_reg, MemOperand(t8)); - - // Restore current context from top and clear it in debug mode. - li(t8, - ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); - lw(cp, MemOperand(t8)); - -#ifdef DEBUG - li(t8, - ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); - sw(a3, MemOperand(t8)); -#endif - - // Pop the arguments, restore registers, and return. - mov(sp, fp); // Respect ABI stack constraint. - lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); - lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); - - if (argument_count.is_valid()) { - if (argument_count_is_length) { - addu(sp, sp, argument_count); - } else { - Lsa(sp, sp, argument_count, kPointerSizeLog2, t8); - } - } - - if (do_return) { - Ret(USE_DELAY_SLOT); - // If returning, the instruction in the delay slot will be the addiu below. - } - addiu(sp, sp, 8); -} - -int TurboAssembler::ActivationFrameAlignment() { -#if V8_HOST_ARCH_MIPS - // Running on the real platform. Use the alignment as mandated by the local - // environment. - // Note: This will break if we ever start generating snapshots on one Mips - // platform for another Mips platform with a different alignment. - return base::OS::ActivationFrameAlignment(); -#else // V8_HOST_ARCH_MIPS - // If we are using the simulator then we should always align to the expected - // alignment. As the simulator is used to generate snapshots we do not know - // if the target platform will need alignment, so this is controlled from a - // flag. - return v8_flags.sim_stack_alignment; -#endif // V8_HOST_ARCH_MIPS -} - -void MacroAssembler::AssertStackIsAligned() { - if (v8_flags.debug_code) { - ASM_CODE_COMMENT(this); - const int frame_alignment = ActivationFrameAlignment(); - const int frame_alignment_mask = frame_alignment - 1; - - if (frame_alignment > kPointerSize) { - Label alignment_as_expected; - DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - andi(scratch, sp, frame_alignment_mask); - Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); - // Don't use Check here, as it will call Runtime_Abort re-entering here. - stop(); - bind(&alignment_as_expected); - } - } -} - -void TurboAssembler::JumpIfSmi(Register value, Label* smi_label, - BranchDelaySlot bd) { - DCHECK_EQ(0, kSmiTag); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - andi(scratch, value, kSmiTagMask); - Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); -} - -void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label, - BranchDelaySlot bd) { - DCHECK_EQ(0, kSmiTag); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - andi(scratch, value, kSmiTagMask); - Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); -} - -void MacroAssembler::AssertNotSmi(Register object) { - if (v8_flags.debug_code) { - ASM_CODE_COMMENT(this); - static_assert(kSmiTag == 0); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - andi(scratch, object, kSmiTagMask); - Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); - } -} - -void MacroAssembler::AssertSmi(Register object) { - if (v8_flags.debug_code) { - ASM_CODE_COMMENT(this); - static_assert(kSmiTag == 0); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - andi(scratch, object, kSmiTagMask); - Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg)); - } -} - -void MacroAssembler::AssertConstructor(Register object) { - if (v8_flags.debug_code) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - static_assert(kSmiTag == 0); - SmiTst(object, t8); - Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8, - Operand(zero_reg)); - - LoadMap(t8, object); - lbu(t8, FieldMemOperand(t8, Map::kBitFieldOffset)); - And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask)); - Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg)); - } -} - -void MacroAssembler::AssertFunction(Register object) { - if (v8_flags.debug_code) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - static_assert(kSmiTag == 0); - SmiTst(object, t8); - Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8, - Operand(zero_reg)); - push(object); - LoadMap(object, object); - GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t8); - Check(ls, AbortReason::kOperandIsNotAFunction, t8, - Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE)); - pop(object); - } -} - -void MacroAssembler::AssertCallableFunction(Register object) { - if (v8_flags.debug_code) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - static_assert(kSmiTag == 0); - SmiTst(object, t8); - Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8, - Operand(zero_reg)); - push(object); - LoadMap(object, object); - GetInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE, t8); - Check(ls, AbortReason::kOperandIsNotACallableFunction, t8, - Operand(LAST_CALLABLE_JS_FUNCTION_TYPE - - FIRST_CALLABLE_JS_FUNCTION_TYPE)); - pop(object); - } -} - -void MacroAssembler::AssertBoundFunction(Register object) { - if (v8_flags.debug_code) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - static_assert(kSmiTag == 0); - SmiTst(object, t8); - Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8, - Operand(zero_reg)); - GetObjectType(object, t8, t8); - Check(eq, AbortReason::kOperandIsNotABoundFunction, t8, - Operand(JS_BOUND_FUNCTION_TYPE)); - } -} - -void MacroAssembler::AssertGeneratorObject(Register object) { - if (!v8_flags.debug_code) return; - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - static_assert(kSmiTag == 0); - SmiTst(object, t8); - Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8, - Operand(zero_reg)); - - GetObjectType(object, t8, t8); - - Label done; - - // Check if JSGeneratorObject - Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE)); - - // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType) - Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE)); - - // Check if JSAsyncGeneratorObject - Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE)); - - Abort(AbortReason::kOperandIsNotAGeneratorObject); - - bind(&done); -} - -void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, - Register scratch) { - if (v8_flags.debug_code) { - ASM_CODE_COMMENT(this); - Label done_checking; - AssertNotSmi(object); - LoadRoot(scratch, RootIndex::kUndefinedValue); - Branch(&done_checking, eq, object, Operand(scratch)); - GetObjectType(object, scratch, scratch); - Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch, - Operand(ALLOCATION_SITE_TYPE)); - bind(&done_checking); - } -} - -void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1, - FPURegister src2, Label* out_of_line) { - ASM_CODE_COMMENT(this); - if (src1 == src2) { - Move_s(dst, src1); - return; - } - - // Check if one of operands is NaN. - CompareIsNanF32(src1, src2); - BranchTrueF(out_of_line); - - if (IsMipsArchVariant(kMips32r6)) { - max_s(dst, src1, src2); - } else { - Label return_left, return_right, done; - - CompareF32(OLT, src1, src2); - BranchTrueShortF(&return_right); - CompareF32(OLT, src2, src1); - BranchTrueShortF(&return_left); - - // Operands are equal, but check for +/-0. - { - BlockTrampolinePoolScope block_trampoline_pool(this); - mfc1(t8, src1); - Branch(&return_left, eq, t8, Operand(zero_reg)); - Branch(&return_right); - } - - bind(&return_right); - if (src2 != dst) { - Move_s(dst, src2); - } - Branch(&done); - - bind(&return_left); - if (src1 != dst) { - Move_s(dst, src1); - } - - bind(&done); - } -} - -void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1, - FPURegister src2) { - add_s(dst, src1, src2); -} - -void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1, - FPURegister src2, Label* out_of_line) { - ASM_CODE_COMMENT(this); - if (src1 == src2) { - Move_s(dst, src1); - return; - } - - // Check if one of operands is NaN. - CompareIsNanF32(src1, src2); - BranchTrueF(out_of_line); - - if (IsMipsArchVariant(kMips32r6)) { - min_s(dst, src1, src2); - } else { - Label return_left, return_right, done; - - CompareF32(OLT, src1, src2); - BranchTrueShortF(&return_left); - CompareF32(OLT, src2, src1); - BranchTrueShortF(&return_right); - - // Left equals right => check for -0. - { - BlockTrampolinePoolScope block_trampoline_pool(this); - mfc1(t8, src1); - Branch(&return_right, eq, t8, Operand(zero_reg)); - Branch(&return_left); - } - - bind(&return_right); - if (src2 != dst) { - Move_s(dst, src2); - } - Branch(&done); - - bind(&return_left); - if (src1 != dst) { - Move_s(dst, src1); - } - - bind(&done); - } -} - -void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1, - FPURegister src2) { - add_s(dst, src1, src2); -} - -void TurboAssembler::Float64Max(DoubleRegister dst, DoubleRegister src1, - DoubleRegister src2, Label* out_of_line) { - ASM_CODE_COMMENT(this); - if (src1 == src2) { - Move_d(dst, src1); - return; - } - - // Check if one of operands is NaN. - CompareIsNanF64(src1, src2); - BranchTrueF(out_of_line); - - if (IsMipsArchVariant(kMips32r6)) { - max_d(dst, src1, src2); - } else { - Label return_left, return_right, done; - - CompareF64(OLT, src1, src2); - BranchTrueShortF(&return_right); - CompareF64(OLT, src2, src1); - BranchTrueShortF(&return_left); - - // Left equals right => check for -0. - { - BlockTrampolinePoolScope block_trampoline_pool(this); - Mfhc1(t8, src1); - Branch(&return_left, eq, t8, Operand(zero_reg)); - Branch(&return_right); - } - - bind(&return_right); - if (src2 != dst) { - Move_d(dst, src2); - } - Branch(&done); - - bind(&return_left); - if (src1 != dst) { - Move_d(dst, src1); - } - - bind(&done); - } -} - -void TurboAssembler::Float64MaxOutOfLine(DoubleRegister dst, - DoubleRegister src1, - DoubleRegister src2) { - add_d(dst, src1, src2); -} - -void TurboAssembler::Float64Min(DoubleRegister dst, DoubleRegister src1, - DoubleRegister src2, Label* out_of_line) { - ASM_CODE_COMMENT(this); - if (src1 == src2) { - Move_d(dst, src1); - return; - } - - // Check if one of operands is NaN. - CompareIsNanF64(src1, src2); - BranchTrueF(out_of_line); - - if (IsMipsArchVariant(kMips32r6)) { - min_d(dst, src1, src2); - } else { - Label return_left, return_right, done; - - CompareF64(OLT, src1, src2); - BranchTrueShortF(&return_left); - CompareF64(OLT, src2, src1); - BranchTrueShortF(&return_right); - - // Left equals right => check for -0. - { - BlockTrampolinePoolScope block_trampoline_pool(this); - Mfhc1(t8, src1); - Branch(&return_right, eq, t8, Operand(zero_reg)); - Branch(&return_left); - } - - bind(&return_right); - if (src2 != dst) { - Move_d(dst, src2); - } - Branch(&done); - - bind(&return_left); - if (src1 != dst) { - Move_d(dst, src1); - } - - bind(&done); - } -} - -void TurboAssembler::Float64MinOutOfLine(DoubleRegister dst, - DoubleRegister src1, - DoubleRegister src2) { - add_d(dst, src1, src2); -} - -static const int kRegisterPassedArguments = 4; - -int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments) { - int stack_passed_words = 0; - num_reg_arguments += 2 * num_double_arguments; - - // Up to four simple arguments are passed in registers a0..a3. - if (num_reg_arguments > kRegisterPassedArguments) { - stack_passed_words += num_reg_arguments - kRegisterPassedArguments; - } - stack_passed_words += kCArgSlotCount; - return stack_passed_words; -} - -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, - int num_double_arguments, - Register scratch) { - ASM_CODE_COMMENT(this); - int frame_alignment = ActivationFrameAlignment(); - - // Up to four simple arguments are passed in registers a0..a3. - // Those four arguments must have reserved argument slots on the stack for - // mips, even though those argument slots are not normally used. - // Remaining arguments are pushed on the stack, above (higher address than) - // the argument slots. - int stack_passed_arguments = - CalculateStackPassedWords(num_reg_arguments, num_double_arguments); - if (frame_alignment > kPointerSize) { - // Make stack end at alignment and make room for num_arguments - 4 words - // and the original value of sp. - mov(scratch, sp); - Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); - DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); - And(sp, sp, Operand(-frame_alignment)); - sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); - } else { - Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); - } -} - -void TurboAssembler::PrepareCallCFunction(int num_reg_arguments, - Register scratch) { - PrepareCallCFunction(num_reg_arguments, 0, scratch); -} - -void TurboAssembler::CallCFunction(ExternalReference function, - int num_reg_arguments, - int num_double_arguments) { - ASM_CODE_COMMENT(this); - // Linux/MIPS convention demands that register t9 contains - // the address of the function being call in case of - // Position independent code - BlockTrampolinePoolScope block_trampoline_pool(this); - li(t9, function); - CallCFunctionHelper(t9, 0, num_reg_arguments, num_double_arguments); -} - -void TurboAssembler::CallCFunction(Register function, int num_reg_arguments, - int num_double_arguments) { - ASM_CODE_COMMENT(this); - CallCFunctionHelper(function, 0, num_reg_arguments, num_double_arguments); -} - -void TurboAssembler::CallCFunction(ExternalReference function, - int num_arguments) { - CallCFunction(function, num_arguments, 0); -} - -void TurboAssembler::CallCFunction(Register function, int num_arguments) { - CallCFunction(function, num_arguments, 0); -} - -void TurboAssembler::CallCFunctionHelper(Register function_base, - int16_t function_offset, - int num_reg_arguments, - int num_double_arguments) { - DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters); - DCHECK(has_frame()); - // Make sure that the stack is aligned before calling a C function unless - // running in the simulator. The simulator has its own alignment check which - // provides more information. - // The argument stots are presumed to have been set up by - // PrepareCallCFunction. The C function must be called via t9, for mips ABI. - -#if V8_HOST_ARCH_MIPS - if (v8_flags.debug_code) { - int frame_alignment = base::OS::ActivationFrameAlignment(); - int frame_alignment_mask = frame_alignment - 1; - if (frame_alignment > kPointerSize) { - DCHECK(base::bits::IsPowerOfTwo(frame_alignment)); - Label alignment_as_expected; - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - And(scratch, sp, Operand(frame_alignment_mask)); - Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg)); - // Don't use Check here, as it will call Runtime_Abort possibly - // re-entering here. - stop(); - bind(&alignment_as_expected); - } - } -#endif // V8_HOST_ARCH_MIPS - - // Just call directly. The function called cannot cause a GC, or - // allow preemption, so the return address in the link register - // stays correct. - - { - BlockTrampolinePoolScope block_trampoline_pool(this); - if (function_base != t9) { - mov(t9, function_base); - function_base = t9; - } - - if (function_offset != 0) { - addiu(t9, t9, function_offset); - function_offset = 0; - } - - // Save the frame pointer and PC so that the stack layout remains iterable, - // even without an ExitFrame which normally exists between JS and C frames. - // 't' registers are caller-saved so this is safe as a scratch register. - Register pc_scratch = t4; - Register scratch = t5; - DCHECK(!AreAliased(pc_scratch, scratch, function_base)); - - mov(scratch, ra); - nal(); - mov(pc_scratch, ra); - mov(ra, scratch); - - // See x64 code for reasoning about how to address the isolate data fields. - if (root_array_available()) { - sw(pc_scratch, MemOperand(kRootRegister, - IsolateData::fast_c_call_caller_pc_offset())); - sw(fp, MemOperand(kRootRegister, - IsolateData::fast_c_call_caller_fp_offset())); - } else { - DCHECK_NOT_NULL(isolate()); - li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate())); - sw(pc_scratch, MemOperand(scratch)); - li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); - sw(fp, MemOperand(scratch)); - } - - Call(function_base, function_offset); - - // We don't unset the PC; the FP is the source of truth. - if (root_array_available()) { - sw(zero_reg, MemOperand(kRootRegister, - IsolateData::fast_c_call_caller_fp_offset())); - } else { - DCHECK_NOT_NULL(isolate()); - li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate())); - sw(zero_reg, MemOperand(scratch)); - } - - int stack_passed_arguments = - CalculateStackPassedWords(num_reg_arguments, num_double_arguments); - - if (base::OS::ActivationFrameAlignment() > kPointerSize) { - lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); - } else { - Addu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); - } - - set_pc_for_safepoint(); - } -} - -#undef BRANCH_ARGS_CHECK - -void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, - Condition cc, Label* condition_met) { - ASM_CODE_COMMENT(this); - And(scratch, object, Operand(~kPageAlignmentMask)); - lw(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); - And(scratch, scratch, Operand(mask)); - Branch(condition_met, cc, scratch, Operand(zero_reg)); -} - -Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, - Register reg4, Register reg5, - Register reg6) { - RegList regs = {reg1, reg2, reg3, reg4, reg5, reg6}; - - const RegisterConfiguration* config = RegisterConfiguration::Default(); - for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { - int code = config->GetAllocatableGeneralCode(i); - Register candidate = Register::from_code(code); - if (regs.has(candidate)) continue; - return candidate; - } - UNREACHABLE(); -} - -void TurboAssembler::ComputeCodeStartAddress(Register dst) { - // This push on ra and the pop below together ensure that we restore the - // register ra, which is needed while computing the code start address. - push(ra); - - // The nal instruction puts the address of the current instruction into - // the return address (ra) register, which we can use later on. - if (IsMipsArchVariant(kMips32r6)) { - addiupc(ra, 1); - } else { - nal(); - nop(); - } - int pc = pc_offset(); - li(dst, pc); - subu(dst, ra, dst); - - pop(ra); // Restore ra -} - -void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, - DeoptimizeKind kind, Label* ret, - Label*) { - ASM_CODE_COMMENT(this); - BlockTrampolinePoolScope block_trampoline_pool(this); - Lw(t9, - MemOperand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(target))); - Call(t9); - DCHECK_EQ(SizeOfCodeGeneratedSince(exit), - (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize - : Deoptimizer::kEagerDeoptExitSize); -} - -void TurboAssembler::LoadCodeObjectEntry(Register destination, - Register code_object) { - ASM_CODE_COMMENT(this); - // Code objects are called differently depending on whether we are generating - // builtin code (which will later be embedded into the binary) or compiling - // user JS code at runtime. - // * Builtin code runs in --jitless mode and thus must not call into on-heap - // Code targets. Instead, we dispatch through the builtins entry table. - // * Codegen at runtime does not have this restriction and we can use the - // shorter, branchless instruction sequence. The assumption here is that - // targets are usually generated code and not builtin Code objects. - if (options().isolate_independent_code) { - DCHECK(root_array_available()); - Label if_code_is_off_heap, out; - - Register scratch = kScratchReg; - DCHECK(!AreAliased(destination, scratch)); - DCHECK(!AreAliased(code_object, scratch)); - - // Check whether the Code object is an off-heap trampoline. If so, call its - // (off-heap) entry point directly without going through the (on-heap) - // trampoline. Otherwise, just call the Code object as always. - Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset)); - And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask)); - Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg)); - - // Not an off-heap trampoline object, the entry point is at - // Code::raw_instruction_start(). - Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag); - Branch(&out); - - // An off-heap trampoline, the entry point is loaded from the builtin entry - // table. - bind(&if_code_is_off_heap); - Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset)); - Lsa(destination, kRootRegister, scratch, kSystemPointerSizeLog2); - Lw(destination, - MemOperand(destination, IsolateData::builtin_entry_table_offset())); - - bind(&out); - } else { - Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag); - } -} - -void TurboAssembler::CallCodeObject(Register code_object) { - ASM_CODE_COMMENT(this); - LoadCodeObjectEntry(code_object, code_object); - Call(code_object); -} -void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) { - ASM_CODE_COMMENT(this); - DCHECK_EQ(JumpMode::kJump, jump_mode); - LoadCodeObjectEntry(code_object, code_object); - Jump(code_object); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS diff --git a/src/codegen/mips/macro-assembler-mips.h b/src/codegen/mips/macro-assembler-mips.h deleted file mode 100644 index df6fc49197..0000000000 --- a/src/codegen/mips/macro-assembler-mips.h +++ /dev/null @@ -1,1211 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H -#error This header must be included via macro-assembler.h -#endif - -#ifndef V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_ -#define V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_ - -#include "src/codegen/assembler.h" -#include "src/codegen/mips/assembler-mips.h" -#include "src/common/globals.h" -#include "src/objects/contexts.h" -#include "src/objects/tagged-index.h" - -namespace v8 { -namespace internal { - -// Forward declarations -enum class AbortReason : uint8_t; - -// Reserved Register Usage Summary. -// -// Registers t8, t9, and at are reserved for use by the MacroAssembler. -// -// The programmer should know that the MacroAssembler may clobber these three, -// but won't touch other registers except in special cases. -// -// Per the MIPS ABI, register t9 must be used for indirect function call -// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when -// trying to update gp register for position-independent-code. Whenever -// MIPS generated code calls C code, it must be via t9 register. - -// Flags used for LeaveExitFrame function. -enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false }; - -// Flags used for the li macro-assembler function. -enum LiFlags { - // If the constant value can be represented in just 16 bits, then - // optimize the li to use a single instruction, rather than lui/ori pair. - OPTIMIZE_SIZE = 0, - // Always use 2 instructions (lui/ori pair), even if the constant could - // be loaded with just one, so that this value is patchable later. - CONSTANT_SIZE = 1 -}; - -enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved }; - -Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, - Register reg3 = no_reg, - Register reg4 = no_reg, - Register reg5 = no_reg, - Register reg6 = no_reg); - -// ----------------------------------------------------------------------------- -// Static helper functions. -// Generate a MemOperand for loading a field from an object. -inline MemOperand FieldMemOperand(Register object, int offset) { - return MemOperand(object, offset - kHeapObjectTag); -} - -// Generate a MemOperand for storing arguments 5..N on the stack -// when calling CallCFunction(). -inline MemOperand CFunctionArgumentOperand(int index) { - DCHECK_GT(index, kCArgSlotCount); - // Argument 5 takes the slot just past the four Arg-slots. - int offset = (index - 5) * kPointerSize + kCArgsSlotsSize; - return MemOperand(sp, offset); -} - -class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { - public: - using TurboAssemblerBase::TurboAssemblerBase; - - // Activation support. - void EnterFrame(StackFrame::Type type); - void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) { - // Out-of-line constant pool not implemented on mips. - UNREACHABLE(); - } - void LeaveFrame(StackFrame::Type type); - - void AllocateStackSpace(Register bytes) { Subu(sp, sp, bytes); } - void AllocateStackSpace(int bytes) { - DCHECK_GE(bytes, 0); - if (bytes == 0) return; - Subu(sp, sp, Operand(bytes)); - } - - // Generates function and stub prologue code. - void StubPrologue(StackFrame::Type type); - void Prologue(); - - void InitializeRootRegister() { - ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); - li(kRootRegister, Operand(isolate_root)); - } - - // Jump unconditionally to given label. - // We NEED a nop in the branch delay slot, as it used by v8, for example in - // CodeGenerator::ProcessDeferred(). - // Currently the branch delay slot is filled by the MacroAssembler. - // Use rather b(Label) for code generation. - void jmp(Label* L) { Branch(L); } - - // ------------------------------------------------------------------------- - // Debugging. - - void Trap(); - void DebugBreak(); - - // Calls Abort(msg) if the condition cc is not satisfied. - // Use --debug_code to enable. - void Assert(Condition cc, AbortReason reason, Register rs, Operand rt); - - // Like Assert(), but always enabled. - void Check(Condition cc, AbortReason reason, Register rs, Operand rt); - - // Print a message to stdout and abort execution. - void Abort(AbortReason msg); - - // Arguments macros. -#define COND_TYPED_ARGS Condition cond, Register r1, const Operand &r2 -#define COND_ARGS cond, r1, r2 - - // Cases when relocation is not needed. -#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \ - void Name(target_type target, BranchDelaySlot bd = PROTECT); \ - inline void Name(BranchDelaySlot bd, target_type target) { \ - Name(target, bd); \ - } \ - void Name(target_type target, COND_TYPED_ARGS, \ - BranchDelaySlot bd = PROTECT); \ - inline void Name(BranchDelaySlot bd, target_type target, COND_TYPED_ARGS) { \ - Name(target, COND_ARGS, bd); \ - } - -#define DECLARE_BRANCH_PROTOTYPES(Name) \ - DECLARE_NORELOC_PROTOTYPE(Name, Label*) \ - DECLARE_NORELOC_PROTOTYPE(Name, int32_t) - - DECLARE_BRANCH_PROTOTYPES(Branch) - DECLARE_BRANCH_PROTOTYPES(BranchAndLink) - DECLARE_BRANCH_PROTOTYPES(BranchShort) - -#undef DECLARE_BRANCH_PROTOTYPES -#undef COND_TYPED_ARGS -#undef COND_ARGS - - // Floating point branches - void CompareF32(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { - CompareF(S, cc, cmp1, cmp2); - } - - void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2) { - CompareIsNanF(S, cmp1, cmp2); - } - - void CompareF64(FPUCondition cc, FPURegister cmp1, FPURegister cmp2) { - CompareF(D, cc, cmp1, cmp2); - } - - void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2) { - CompareIsNanF(D, cmp1, cmp2); - } - - void BranchTrueShortF(Label* target, BranchDelaySlot bd = PROTECT); - void BranchFalseShortF(Label* target, BranchDelaySlot bd = PROTECT); - - void BranchTrueF(Label* target, BranchDelaySlot bd = PROTECT); - void BranchFalseF(Label* target, BranchDelaySlot bd = PROTECT); - - // MSA Branches - void BranchMSA(Label* target, MSABranchDF df, MSABranchCondition cond, - MSARegister wt, BranchDelaySlot bd = PROTECT); - - void BranchLong(int32_t offset, BranchDelaySlot bdslot = PROTECT); - void Branch(Label* L, Condition cond, Register rs, RootIndex index, - BranchDelaySlot bdslot = PROTECT); - - // Load int32 in the rd register. - void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE); - inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) { - li(rd, Operand(j), mode); - } - void li(Register dst, Handle value, LiFlags mode = OPTIMIZE_SIZE); - void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE); - void li(Register dst, const StringConstantBase* string, - LiFlags mode = OPTIMIZE_SIZE); - - void LoadFromConstantsTable(Register destination, int constant_index) final; - void LoadRootRegisterOffset(Register destination, intptr_t offset) final; - void LoadRootRelative(Register destination, int32_t offset) final; - - inline void Move(Register output, MemOperand operand) { Lw(output, operand); } - -// Jump, Call, and Ret pseudo instructions implementing inter-working. -#define COND_ARGS \ - Condition cond = al, Register rs = zero_reg, \ - const Operand &rt = Operand(zero_reg), \ - BranchDelaySlot bd = PROTECT - - void Jump(Register target, int16_t offset = 0, COND_ARGS); - void Jump(Register target, Register base, int16_t offset = 0, COND_ARGS); - void Jump(Register target, const Operand& offset, COND_ARGS); - void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS); - void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS); - // Deffer from li, this method save target to the memory, and then load - // it to register use lw, it can be used in wasm jump table for concurrent - // patching. - void PatchAndJump(Address target); - void Jump(Handle code, RelocInfo::Mode rmode, COND_ARGS); - void Jump(const ExternalReference& reference); - void Call(Register target, int16_t offset = 0, COND_ARGS); - void Call(Register target, Register base, int16_t offset = 0, COND_ARGS); - void Call(Address target, RelocInfo::Mode rmode, COND_ARGS); - void Call(Handle code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, - COND_ARGS); - void Call(Label* target); - void LoadAddress(Register dst, Label* target); - - // Load the builtin given by the Smi in |builtin| into the same - // register. - void LoadEntryFromBuiltinIndex(Register builtin); - void LoadEntryFromBuiltin(Builtin builtin, Register destination); - MemOperand EntryFromBuiltinAsOperand(Builtin builtin); - - void CallBuiltinByIndex(Register builtin_index); - void CallBuiltin(Builtin builtin); - - void LoadCodeObjectEntry(Register destination, Register code_object); - void CallCodeObject(Register code_object); - - void JumpCodeObject(Register code_object, - JumpMode jump_mode = JumpMode::kJump); - - // Generates an instruction sequence s.t. the return address points to the - // instruction following the call. - // The return address on the stack is used by frame iteration. - void StoreReturnAddressAndCall(Register target); - - void CallForDeoptimization(Builtin target, int deopt_id, Label* exit, - DeoptimizeKind kind, Label* ret, - Label* jump_deoptimization_entry_label); - - void Ret(COND_ARGS); - inline void Ret(BranchDelaySlot bd, Condition cond = al, - Register rs = zero_reg, - const Operand& rt = Operand(zero_reg)) { - Ret(cond, rs, rt, bd); - } - - // Emit code to discard a non-negative number of pointer-sized elements - // from the stack, clobbering only the sp register. - void Drop(int count, Condition cond = cc_always, Register reg = no_reg, - const Operand& op = Operand(no_reg)); - - // We assume the size of the arguments is the pointer size. - // An optional mode argument is passed, which can indicate we need to - // explicitly add the receiver to the count. - enum ArgumentsCountMode { kCountIncludesReceiver, kCountExcludesReceiver }; - enum ArgumentsCountType { kCountIsInteger, kCountIsSmi, kCountIsBytes }; - void DropArguments(Register count, ArgumentsCountType type, - ArgumentsCountMode mode); - void DropArgumentsAndPushNewReceiver(Register argc, Register receiver, - ArgumentsCountType type, - ArgumentsCountMode mode); - - // Trivial case of DropAndRet that utilizes the delay slot. - void DropAndRet(int drop); - - void DropAndRet(int drop, Condition cond, Register reg, const Operand& op); - - void Lw(Register rd, const MemOperand& rs); - void Sw(Register rd, const MemOperand& rs); - - void push(Register src) { - Addu(sp, sp, Operand(-kPointerSize)); - sw(src, MemOperand(sp, 0)); - } - - void Push(Register src) { push(src); } - void Push(Handle handle); - void Push(Smi smi); - - // Push two registers. Pushes leftmost register first (to highest address). - void Push(Register src1, Register src2) { - Subu(sp, sp, Operand(2 * kPointerSize)); - sw(src1, MemOperand(sp, 1 * kPointerSize)); - sw(src2, MemOperand(sp, 0 * kPointerSize)); - } - - // Push three registers. Pushes leftmost register first (to highest address). - void Push(Register src1, Register src2, Register src3) { - Subu(sp, sp, Operand(3 * kPointerSize)); - sw(src1, MemOperand(sp, 2 * kPointerSize)); - sw(src2, MemOperand(sp, 1 * kPointerSize)); - sw(src3, MemOperand(sp, 0 * kPointerSize)); - } - - // Push four registers. Pushes leftmost register first (to highest address). - void Push(Register src1, Register src2, Register src3, Register src4) { - Subu(sp, sp, Operand(4 * kPointerSize)); - sw(src1, MemOperand(sp, 3 * kPointerSize)); - sw(src2, MemOperand(sp, 2 * kPointerSize)); - sw(src3, MemOperand(sp, 1 * kPointerSize)); - sw(src4, MemOperand(sp, 0 * kPointerSize)); - } - - // Push five registers. Pushes leftmost register first (to highest address). - void Push(Register src1, Register src2, Register src3, Register src4, - Register src5) { - Subu(sp, sp, Operand(5 * kPointerSize)); - sw(src1, MemOperand(sp, 4 * kPointerSize)); - sw(src2, MemOperand(sp, 3 * kPointerSize)); - sw(src3, MemOperand(sp, 2 * kPointerSize)); - sw(src4, MemOperand(sp, 1 * kPointerSize)); - sw(src5, MemOperand(sp, 0 * kPointerSize)); - } - - void Push(Register src, Condition cond, Register tst1, Register tst2) { - // Since we don't have conditional execution we use a Branch. - Branch(3, cond, tst1, Operand(tst2)); - Subu(sp, sp, Operand(kPointerSize)); - sw(src, MemOperand(sp, 0)); - } - - enum PushArrayOrder { kNormal, kReverse }; - void PushArray(Register array, Register size, Register scratch, - Register scratch2, PushArrayOrder order = kNormal); - - void MaybeSaveRegisters(RegList registers); - void MaybeRestoreRegisters(RegList registers); - - void CallEphemeronKeyBarrier(Register object, Register slot_address, - SaveFPRegsMode fp_mode); - - void CallRecordWriteStubSaveRegisters( - Register object, Register slot_address, - RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, - StubCallMode mode = StubCallMode::kCallBuiltinPointer); - void CallRecordWriteStub( - Register object, Register slot_address, - RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, - StubCallMode mode = StubCallMode::kCallBuiltinPointer); - - // Push multiple registers on the stack. - // Registers are saved in numerical order, with higher numbered registers - // saved in higher memory addresses. - void MultiPush(RegList regs); - void MultiPushFPU(DoubleRegList regs); - - // Calculate how much stack space (in bytes) are required to store caller - // registers excluding those specified in the arguments. - int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, - Register exclusion1 = no_reg, - Register exclusion2 = no_reg, - Register exclusion3 = no_reg) const; - - // Push caller saved registers on the stack, and return the number of bytes - // stack pointer is adjusted. - int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, - Register exclusion2 = no_reg, - Register exclusion3 = no_reg); - // Restore caller saved registers from the stack, and return the number of - // bytes stack pointer is adjusted. - int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg, - Register exclusion2 = no_reg, - Register exclusion3 = no_reg); - - void pop(Register dst) { - lw(dst, MemOperand(sp, 0)); - Addu(sp, sp, Operand(kPointerSize)); - } - - void Pop(Register dst) { pop(dst); } - - // Pop two registers. Pops rightmost register first (from lower address). - void Pop(Register src1, Register src2) { - DCHECK(src1 != src2); - lw(src2, MemOperand(sp, 0 * kPointerSize)); - lw(src1, MemOperand(sp, 1 * kPointerSize)); - Addu(sp, sp, 2 * kPointerSize); - } - - // Pop three registers. Pops rightmost register first (from lower address). - void Pop(Register src1, Register src2, Register src3) { - lw(src3, MemOperand(sp, 0 * kPointerSize)); - lw(src2, MemOperand(sp, 1 * kPointerSize)); - lw(src1, MemOperand(sp, 2 * kPointerSize)); - Addu(sp, sp, 3 * kPointerSize); - } - - void Pop(uint32_t count = 1) { Addu(sp, sp, Operand(count * kPointerSize)); } - - // Pops multiple values from the stack and load them in the - // registers specified in regs. Pop order is the opposite as in MultiPush. - void MultiPop(RegList regs); - void MultiPopFPU(DoubleRegList regs); - - // Load Scaled Address instructions. Parameter sa (shift argument) must be - // between [1, 31] (inclusive). On pre-r6 architectures the scratch register - // may be clobbered. - void Lsa(Register rd, Register rs, Register rt, uint8_t sa, - Register scratch = at); - -#define DEFINE_INSTRUCTION(instr) \ - void instr(Register rd, Register rs, const Operand& rt); \ - void instr(Register rd, Register rs, Register rt) { \ - instr(rd, rs, Operand(rt)); \ - } \ - void instr(Register rs, Register rt, int32_t j) { instr(rs, rt, Operand(j)); } - -#define DEFINE_INSTRUCTION2(instr) \ - void instr(Register rs, const Operand& rt); \ - void instr(Register rs, Register rt) { instr(rs, Operand(rt)); } \ - void instr(Register rs, int32_t j) { instr(rs, Operand(j)); } - -#define DEFINE_INSTRUCTION3(instr) \ - void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt); \ - void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) { \ - instr(rd_hi, rd_lo, rs, Operand(rt)); \ - } \ - void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) { \ - instr(rd_hi, rd_lo, rs, Operand(j)); \ - } - - DEFINE_INSTRUCTION(Addu) - DEFINE_INSTRUCTION(Subu) - DEFINE_INSTRUCTION(Mul) - DEFINE_INSTRUCTION(Div) - DEFINE_INSTRUCTION(Divu) - DEFINE_INSTRUCTION(Mod) - DEFINE_INSTRUCTION(Modu) - DEFINE_INSTRUCTION(Mulh) - DEFINE_INSTRUCTION2(Mult) - DEFINE_INSTRUCTION(Mulhu) - DEFINE_INSTRUCTION2(Multu) - DEFINE_INSTRUCTION2(Div) - DEFINE_INSTRUCTION2(Divu) - - DEFINE_INSTRUCTION3(Div) - DEFINE_INSTRUCTION3(Mul) - DEFINE_INSTRUCTION3(Mulu) - - DEFINE_INSTRUCTION(And) - DEFINE_INSTRUCTION(Or) - DEFINE_INSTRUCTION(Xor) - DEFINE_INSTRUCTION(Nor) - DEFINE_INSTRUCTION2(Neg) - - DEFINE_INSTRUCTION(Slt) - DEFINE_INSTRUCTION(Sltu) - DEFINE_INSTRUCTION(Sle) - DEFINE_INSTRUCTION(Sleu) - DEFINE_INSTRUCTION(Sgt) - DEFINE_INSTRUCTION(Sgtu) - DEFINE_INSTRUCTION(Sge) - DEFINE_INSTRUCTION(Sgeu) - - // MIPS32 R2 instruction macro. - DEFINE_INSTRUCTION(Ror) - -#undef DEFINE_INSTRUCTION -#undef DEFINE_INSTRUCTION2 -#undef DEFINE_INSTRUCTION3 - - void SmiUntag(Register reg) { sra(reg, reg, kSmiTagSize); } - - void SmiUntag(Register dst, Register src) { sra(dst, src, kSmiTagSize); } - - void SmiToInt32(Register smi) { SmiUntag(smi); } - - int CalculateStackPassedWords(int num_reg_arguments, - int num_double_arguments); - - // Before calling a C-function from generated code, align arguments on stack - // and add space for the four mips argument slots. - // After aligning the frame, non-register arguments must be stored on the - // stack, after the argument-slots using helper: CFunctionArgumentOperand(). - // The argument count assumes all arguments are word sized. - // Some compilers/platforms require the stack to be aligned when calling - // C++ code. - // Needs a scratch register to do some arithmetic. This register will be - // trashed. - void PrepareCallCFunction(int num_reg_arguments, int num_double_registers, - Register scratch); - void PrepareCallCFunction(int num_reg_arguments, Register scratch); - - // Arguments 1-4 are placed in registers a0 through a3 respectively. - // Arguments 5..n are stored to stack using following: - // sw(t0, CFunctionArgumentOperand(5)); - - // Calls a C function and cleans up the space for arguments allocated - // by PrepareCallCFunction. The called function is not allowed to trigger a - // garbage collection, since that might move the code and invalidate the - // return address (unless this is somehow accounted for by the called - // function). - void CallCFunction(ExternalReference function, int num_arguments); - void CallCFunction(Register function, int num_arguments); - void CallCFunction(ExternalReference function, int num_reg_arguments, - int num_double_arguments); - void CallCFunction(Register function, int num_reg_arguments, - int num_double_arguments); - void MovFromFloatResult(DoubleRegister dst); - void MovFromFloatParameter(DoubleRegister dst); - - // There are two ways of passing double arguments on MIPS, depending on - // whether soft or hard floating point ABI is used. These functions - // abstract parameter passing for the three different ways we call - // C functions from generated code. - void MovToFloatParameter(DoubleRegister src); - void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); - void MovToFloatResult(DoubleRegister src); - - // See comments at the beginning of Builtins::Generate_CEntry. - inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); } - inline void PrepareCEntryFunction(const ExternalReference& ref) { - li(a1, ref); - } - - void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, - Label* condition_met); -#undef COND_ARGS - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. - // Exits with 'result' holding the answer. - void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result, - DoubleRegister double_input, StubCallMode stub_mode); - - // Conditional move. - void Movz(Register rd, Register rs, Register rt); - void Movn(Register rd, Register rs, Register rt); - void Movt(Register rd, Register rs, uint16_t cc = 0); - void Movf(Register rd, Register rs, uint16_t cc = 0); - - void LoadZeroIfFPUCondition(Register dest); - void LoadZeroIfNotFPUCondition(Register dest); - - void LoadZeroIfConditionNotZero(Register dest, Register condition); - void LoadZeroIfConditionZero(Register dest, Register condition); - void LoadZeroOnCondition(Register rd, Register rs, const Operand& rt, - Condition cond); - - void Clz(Register rd, Register rs); - void Ctz(Register rd, Register rs); - void Popcnt(Register rd, Register rs); - - // Int64Lowering instructions - void AddPair(Register dst_low, Register dst_high, Register left_low, - Register left_high, Register right_low, Register right_high, - Register scratch1, Register scratch2); - - void AddPair(Register dst_low, Register dst_high, Register left_low, - Register left_high, int32_t imm, Register scratch1, - Register scratch2); - - void SubPair(Register dst_low, Register dst_high, Register left_low, - Register left_high, Register right_low, Register right_high, - Register scratch1, Register scratch2); - - void AndPair(Register dst_low, Register dst_high, Register left_low, - Register left_high, Register right_low, Register right_high); - - void OrPair(Register dst_low, Register dst_high, Register left_low, - Register left_high, Register right_low, Register right_high); - - void XorPair(Register dst_low, Register dst_high, Register left_low, - Register left_high, Register right_low, Register right_high); - - void MulPair(Register dst_low, Register dst_high, Register left_low, - Register left_high, Register right_low, Register right_high, - Register scratch1, Register scratch2); - - void ShlPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, Register shift, Register scratch1, - Register scratch2); - - void ShlPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, uint32_t shift, Register scratch); - - void ShrPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, Register shift, Register scratch1, - Register scratch2); - - void ShrPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, uint32_t shift, Register scratch); - - void SarPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, Register shift, Register scratch1, - Register scratch2); - - void SarPair(Register dst_low, Register dst_high, Register src_low, - Register src_high, uint32_t shift, Register scratch); - - // MIPS32 R2 instruction macro. - void Ins(Register rt, Register rs, uint16_t pos, uint16_t size); - void Ext(Register rt, Register rs, uint16_t pos, uint16_t size); - void ExtractBits(Register dest, Register source, Register pos, int size, - bool sign_extend = false); - void InsertBits(Register dest, Register source, Register pos, int size); - - void Seb(Register rd, Register rt); - void Seh(Register rd, Register rt); - void Neg_s(FPURegister fd, FPURegister fs); - void Neg_d(FPURegister fd, FPURegister fs); - - // MIPS32 R6 instruction macros. - void Bovc(Register rt, Register rs, Label* L); - void Bnvc(Register rt, Register rs, Label* L); - - // Convert single to unsigned word. - void Trunc_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch); - void Trunc_uw_s(Register rd, FPURegister fs, FPURegister scratch); - - void Trunc_w_d(FPURegister fd, FPURegister fs); - void Round_w_d(FPURegister fd, FPURegister fs); - void Floor_w_d(FPURegister fd, FPURegister fs); - void Ceil_w_d(FPURegister fd, FPURegister fs); - - // Round double functions - void Trunc_d_d(FPURegister fd, FPURegister fs); - void Round_d_d(FPURegister fd, FPURegister fs); - void Floor_d_d(FPURegister fd, FPURegister fs); - void Ceil_d_d(FPURegister fd, FPURegister fs); - - // Round float functions - void Trunc_s_s(FPURegister fd, FPURegister fs); - void Round_s_s(FPURegister fd, FPURegister fs); - void Floor_s_s(FPURegister fd, FPURegister fs); - void Ceil_s_s(FPURegister fd, FPURegister fs); - - // FP32 mode: Move the general purpose register into - // the high part of the double-register pair. - // FP64 mode: Move the general-purpose register into - // the higher 32 bits of the 64-bit coprocessor register, - // while leaving the low bits unchanged. - void Mthc1(Register rt, FPURegister fs); - - // FP32 mode: move the high part of the double-register pair into - // general purpose register. - // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into - // general-purpose register. - void Mfhc1(Register rt, FPURegister fs); - - void Madd_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, - FPURegister scratch); - void Madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, - FPURegister scratch); - void Msub_s(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, - FPURegister scratch); - void Msub_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft, - FPURegister scratch); - - // Change endianness - void ByteSwapSigned(Register dest, Register src, int operand_size); - void ByteSwapUnsigned(Register dest, Register src, int operand_size); - - void Ulh(Register rd, const MemOperand& rs); - void Ulhu(Register rd, const MemOperand& rs); - void Ush(Register rd, const MemOperand& rs, Register scratch); - - void Ulw(Register rd, const MemOperand& rs); - void Usw(Register rd, const MemOperand& rs); - - void Ulwc1(FPURegister fd, const MemOperand& rs, Register scratch); - void Uswc1(FPURegister fd, const MemOperand& rs, Register scratch); - - void Uldc1(FPURegister fd, const MemOperand& rs, Register scratch); - void Usdc1(FPURegister fd, const MemOperand& rs, Register scratch); - - void Ldc1(FPURegister fd, const MemOperand& src); - void Sdc1(FPURegister fs, const MemOperand& dst); - - void Ll(Register rd, const MemOperand& rs); - void Sc(Register rd, const MemOperand& rs); - - // Perform a floating-point min or max operation with the - // (IEEE-754-compatible) semantics of MIPS32's Release 6 MIN.fmt/MAX.fmt. - // Some cases, typically NaNs or +/-0.0, are expected to be rare and are - // handled in out-of-line code. The specific behaviour depends on supported - // instructions. - // - // These functions assume (and assert) that src1!=src2. It is permitted - // for the result to alias either input register. - void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2, - Label* out_of_line); - void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2, - Label* out_of_line); - void Float64Max(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2, - Label* out_of_line); - void Float64Min(DoubleRegister dst, DoubleRegister src1, DoubleRegister src2, - Label* out_of_line); - - // Generate out-of-line cases for the macros above. - void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); - void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2); - void Float64MaxOutOfLine(DoubleRegister dst, DoubleRegister src1, - DoubleRegister src2); - void Float64MinOutOfLine(DoubleRegister dst, DoubleRegister src1, - DoubleRegister src2); - - bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; } - - void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); } - - inline void Move(Register dst, Handle handle) { li(dst, handle); } - inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); } - - inline void Move(Register dst, Register src) { - if (dst != src) { - mov(dst, src); - } - } - - inline void Move_d(FPURegister dst, FPURegister src) { - if (dst != src) { - mov_d(dst, src); - } - } - - inline void Move_s(FPURegister dst, FPURegister src) { - if (dst != src) { - mov_s(dst, src); - } - } - - inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); } - - inline void Move(Register dst_low, Register dst_high, FPURegister src) { - mfc1(dst_low, src); - Mfhc1(dst_high, src); - } - - inline void FmoveHigh(Register dst_high, FPURegister src) { - Mfhc1(dst_high, src); - } - - inline void FmoveHigh(FPURegister dst, Register src_high) { - Mthc1(src_high, dst); - } - - inline void FmoveLow(Register dst_low, FPURegister src) { - mfc1(dst_low, src); - } - - void FmoveLow(FPURegister dst, Register src_low); - - inline void Move(FPURegister dst, Register src_low, Register src_high) { - mtc1(src_low, dst); - Mthc1(src_high, dst); - } - - void Move(FPURegister dst, float imm) { - Move(dst, base::bit_cast(imm)); - } - void Move(FPURegister dst, double imm) { - Move(dst, base::bit_cast(imm)); - } - void Move(FPURegister dst, uint32_t src); - void Move(FPURegister dst, uint64_t src); - - // ------------------------------------------------------------------------- - // Overflow operations. - - // AddOverflow sets overflow register to a negative value if - // overflow occured, otherwise it is zero or positive - void AddOverflow(Register dst, Register left, const Operand& right, - Register overflow); - // SubOverflow sets overflow register to a negative value if - // overflow occured, otherwise it is zero or positive - void SubOverflow(Register dst, Register left, const Operand& right, - Register overflow); - // MulOverflow sets overflow register to zero if no overflow occured - void MulOverflow(Register dst, Register left, const Operand& right, - Register overflow); - -// Number of instructions needed for calculation of switch table entry address -#ifdef _MIPS_ARCH_MIPS32R6 - static constexpr int kSwitchTablePrologueSize = 5; -#else - static constexpr int kSwitchTablePrologueSize = 10; -#endif - // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a - // functor/function with 'Label *func(size_t index)' declaration. - template - void GenerateSwitchTable(Register index, size_t case_count, - Func GetLabelFunction); - - // Load an object from the root table. - void LoadRoot(Register destination, RootIndex index) final; - void LoadRoot(Register destination, RootIndex index, Condition cond, - Register src1, const Operand& src2); - - void LoadMap(Register destination, Register object); - - // If the value is a NaN, canonicalize the value else, do nothing. - void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src); - - // --------------------------------------------------------------------------- - // FPU macros. These do not handle special cases like NaN or +- inf. - - // Convert unsigned word to double. - void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch); - - // Convert double to unsigned word. - void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch); - void Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch); - - // Jump the register contains a smi. - void JumpIfSmi(Register value, Label* smi_label, - BranchDelaySlot bd = PROTECT); - - void JumpIfEqual(Register a, int32_t b, Label* dest) { - li(kScratchReg, Operand(b)); - Branch(dest, eq, a, Operand(kScratchReg)); - } - - void JumpIfLessThan(Register a, int32_t b, Label* dest) { - li(kScratchReg, Operand(b)); - Branch(dest, lt, a, Operand(kScratchReg)); - } - - // Push a standard frame, consisting of ra, fp, context and JS function. - void PushStandardFrame(Register function_reg); - - // Get the actual activation frame alignment for target environment. - static int ActivationFrameAlignment(); - - // Compute the start of the generated instruction stream from the current PC. - // This is an alternative to embedding the {CodeObject} handle as a reference. - void ComputeCodeStartAddress(Register dst); - - // Control-flow integrity: - - // Define a function entrypoint. This doesn't emit any code for this - // architecture, as control-flow integrity is not supported for it. - void CodeEntry() {} - // Define an exception handler. - void ExceptionHandler() {} - // Define an exception handler and bind a label. - void BindExceptionHandler(Label* label) { bind(label); } - - protected: - void BranchLong(Label* L, BranchDelaySlot bdslot); - - inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch); - - inline int32_t GetOffset(int32_t offset, Label* L, OffsetSize bits); - - private: - bool has_double_zero_reg_set_ = false; - - // Performs a truncating conversion of a floating point number as used by - // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it - // succeeds, otherwise falls through if result is saturated. On return - // 'result' either holds answer, or is clobbered on fall through. - void TryInlineTruncateDoubleToI(Register result, DoubleRegister input, - Label* done); - - void CallCFunctionHelper(Register function_base, int16_t function_offset, - int num_reg_arguments, int num_double_arguments); - - void CompareF(SecondaryField sizeField, FPUCondition cc, FPURegister cmp1, - FPURegister cmp2); - - void CompareIsNanF(SecondaryField sizeField, FPURegister cmp1, - FPURegister cmp2); - - void BranchShortMSA(MSABranchDF df, Label* target, MSABranchCondition cond, - MSARegister wt, BranchDelaySlot bd = PROTECT); - - // TODO(mips) Reorder parameters so out parameters come last. - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits); - bool CalculateOffset(Label* L, int32_t* offset, OffsetSize bits, - Register* scratch, const Operand& rt); - - void BranchShortHelperR6(int32_t offset, Label* L); - void BranchShortHelper(int16_t offset, Label* L, BranchDelaySlot bdslot); - bool BranchShortHelperR6(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - bool BranchShortHelper(int16_t offset, Label* L, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot); - bool BranchShortCheck(int32_t offset, Label* L, Condition cond, Register rs, - const Operand& rt, BranchDelaySlot bdslot); - - void BranchAndLinkShortHelperR6(int32_t offset, Label* L); - void BranchAndLinkShortHelper(int16_t offset, Label* L, - BranchDelaySlot bdslot); - void BranchAndLinkShort(int32_t offset, BranchDelaySlot bdslot = PROTECT); - void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT); - bool BranchAndLinkShortHelperR6(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt); - bool BranchAndLinkShortHelper(int16_t offset, Label* L, Condition cond, - Register rs, const Operand& rt, - BranchDelaySlot bdslot); - bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond, - Register rs, const Operand& rt, - BranchDelaySlot bdslot); - void BranchAndLinkLong(Label* L, BranchDelaySlot bdslot); - - template - void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode, - RoundFunc round); - - template - void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode, - RoundFunc round); - - // Push a fixed frame, consisting of ra, fp. - void PushCommonFrame(Register marker_reg = no_reg); -}; - -// MacroAssembler implements a collection of frequently used macros. -class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { - public: - using TurboAssembler::TurboAssembler; - - // It assumes that the arguments are located below the stack pointer. - // argc is the number of arguments not including the receiver. - // TODO(victorgomes): Remove this function once we stick with the reversed - // arguments order. - void LoadReceiver(Register dest, Register argc) { - Lw(dest, MemOperand(sp, 0)); - } - - void StoreReceiver(Register rec, Register argc, Register scratch) { - Sw(rec, MemOperand(sp, 0)); - } - - // Swap two registers. If the scratch register is omitted then a slightly - // less efficient form using xor instead of mov is emitted. - void Swap(Register reg1, Register reg2, Register scratch = no_reg); - - void TestCodeTIsMarkedForDeoptimizationAndJump(Register codet, - Register scratch, - Condition cond, Label* target); - Operand ClearedValue() const; - - void PushRoot(RootIndex index) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - LoadRoot(scratch, index); - Push(scratch); - } - - // Compare the object in a register to a value and jump if they are equal. - void JumpIfRoot(Register with, RootIndex index, Label* if_equal) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - LoadRoot(scratch, index); - Branch(if_equal, eq, with, Operand(scratch)); - } - - // Compare the object in a register to a value and jump if they are not equal. - void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - LoadRoot(scratch, index); - Branch(if_not_equal, ne, with, Operand(scratch)); - } - - // Checks if value is in range [lower_limit, higher_limit] using a single - // comparison. - void JumpIfIsInRange(Register value, unsigned lower_limit, - unsigned higher_limit, Label* on_in_range); - - // --------------------------------------------------------------------------- - // GC Support - - // Notify the garbage collector that we wrote a pointer into an object. - // |object| is the object being stored into, |value| is the object being - // stored. value and scratch registers are clobbered by the operation. - // The offset is the offset from the start of the object, not the offset from - // the tagged HeapObject pointer. For use with FieldOperand(reg, off). - void RecordWriteField( - Register object, int offset, Register value, Register scratch, - RAStatus ra_status, SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, - SmiCheck smi_check = SmiCheck::kInline); - - // For a given |object| notify the garbage collector that the slot |address| - // has been written. |value| is the object being stored. The value and - // address registers are clobbered by the operation. - void RecordWrite( - Register object, Register address, Register value, RAStatus ra_status, - SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, - SmiCheck smi_check = SmiCheck::kInline); - - void Pref(int32_t hint, const MemOperand& rs); - - // Enter exit frame. - // argc - argument count to be dropped by LeaveExitFrame. - // save_doubles - saves FPU registers on stack, currently disabled. - // stack_space - extra stack space. - void EnterExitFrame(bool save_doubles, int stack_space = 0, - StackFrame::Type frame_type = StackFrame::EXIT); - - // Leave the current exit frame. - void LeaveExitFrame(bool save_doubles, Register arg_count, - bool do_return = NO_EMIT_RETURN, - bool argument_count_is_length = false); - - // Make sure the stack is aligned. Only emits code in debug mode. - void AssertStackIsAligned(); - - // Load the global proxy from the current context. - void LoadGlobalProxy(Register dst) { - LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX); - } - - void LoadNativeContextSlot(Register dst, int index); - - // ------------------------------------------------------------------------- - // JavaScript invokes. - - // Invoke the JavaScript function code by either calling or jumping. - void InvokeFunctionCode(Register function, Register new_target, - Register expected_parameter_count, - Register actual_parameter_count, InvokeType type); - - // On function call, call into the debugger if necessary. - void CheckDebugHook(Register fun, Register new_target, - Register expected_parameter_count, - Register actual_parameter_count); - - // Invoke the JavaScript function in the given register. Changes the - // current context to the context in the function before invoking. - void InvokeFunctionWithNewTarget(Register function, Register new_target, - Register actual_parameter_count, - InvokeType type); - - void InvokeFunction(Register function, Register expected_parameter_count, - Register actual_parameter_count, InvokeType type); - - // Exception handling. - - // Push a new stack handler and link into stack handler chain. - void PushStackHandler(); - - // Unlink the stack handler on top of the stack from the stack handler chain. - // Must preserve the result register. - void PopStackHandler(); - - // ------------------------------------------------------------------------- - // Support functions. - - void GetObjectType(Register function, Register map, Register type_reg); - - void GetInstanceTypeRange(Register map, Register type_reg, - InstanceType lower_limit, Register range); - - // ------------------------------------------------------------------------- - // Runtime calls. - - // Call a runtime routine. - void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); - - // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - const Runtime::Function* function = Runtime::FunctionForId(fid); - CallRuntime(function, function->nargs, save_doubles); - } - - // Convenience function: Same as above, but takes the fid instead. - void CallRuntime(Runtime::FunctionId id, int num_arguments, - SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { - CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles); - } - - // Convenience function: tail call a runtime routine (jump). - void TailCallRuntime(Runtime::FunctionId fid); - - // Jump to the builtin routine. - void JumpToExternalReference(const ExternalReference& builtin, - BranchDelaySlot bd = PROTECT, - bool builtin_exit_frame = false); - - // Generates a trampoline to jump to the off-heap instruction stream. - void JumpToOffHeapInstructionStream(Address entry); - - // --------------------------------------------------------------------------- - // In-place weak references. - void LoadWeakValue(Register out, Register in, Label* target_if_cleared); - - // ------------------------------------------------------------------------- - // StatsCounter support. - - void IncrementCounter(StatsCounter* counter, int value, Register scratch1, - Register scratch2) { - if (!v8_flags.native_code_counters) return; - EmitIncrementCounter(counter, value, scratch1, scratch2); - } - void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1, - Register scratch2); - void DecrementCounter(StatsCounter* counter, int value, Register scratch1, - Register scratch2) { - if (!v8_flags.native_code_counters) return; - EmitDecrementCounter(counter, value, scratch1, scratch2); - } - void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1, - Register scratch2); - - // ------------------------------------------------------------------------- - // Stack limit utilities - - enum StackLimitKind { kInterruptStackLimit, kRealStackLimit }; - void LoadStackLimit(Register destination, StackLimitKind kind); - void StackOverflowCheck(Register num_args, Register scratch1, - Register scratch2, Label* stack_overflow); - - // --------------------------------------------------------------------------- - // Smi utilities. - - void SmiTag(Register reg) { Addu(reg, reg, reg); } - - void SmiTag(Register dst, Register src) { Addu(dst, src, src); } - - // Test if the register contains a smi. - inline void SmiTst(Register value, Register scratch) { - And(scratch, value, Operand(kSmiTagMask)); - } - - // Jump if the register contains a non-smi. - void JumpIfNotSmi(Register value, Label* not_smi_label, - BranchDelaySlot bd = PROTECT); - - // Abort execution if argument is a smi, enabled via --debug-code. - void AssertNotSmi(Register object); - void AssertSmi(Register object); - - // Abort execution if argument is not a Constructor, enabled via --debug-code. - void AssertConstructor(Register object); - - // Abort execution if argument is not a JSFunction, enabled via --debug-code. - void AssertFunction(Register object); - - // Abort execution if argument is not a callable JSFunction, enabled via - // --debug-code. - void AssertCallableFunction(Register object); - - // Abort execution if argument is not a JSBoundFunction, - // enabled via --debug-code. - void AssertBoundFunction(Register object); - - // Abort execution if argument is not a JSGeneratorObject (or subclass), - // enabled via --debug-code. - void AssertGeneratorObject(Register object); - - // Abort execution if argument is not undefined or an AllocationSite, enabled - // via --debug-code. - void AssertUndefinedOrAllocationSite(Register object, Register scratch); - - template - void DecodeField(Register dst, Register src) { - Ext(dst, src, Field::kShift, Field::kSize); - } - - template - void DecodeField(Register reg) { - DecodeField(reg, reg); - } - - private: - // Helper functions for generating invokes. - void InvokePrologue(Register expected_parameter_count, - Register actual_parameter_count, Label* done, - InvokeType type); - - DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); -}; - -template -void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count, - Func GetLabelFunction) { - Label here; - BlockTrampolinePoolFor(case_count + kSwitchTablePrologueSize); - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - if (kArchVariant >= kMips32r6) { - addiupc(scratch, 5); - Lsa(scratch, scratch, index, kPointerSizeLog2); - lw(scratch, MemOperand(scratch)); - } else { - push(ra); - bal(&here); - sll(scratch, index, kPointerSizeLog2); // Branch delay slot. - bind(&here); - addu(scratch, scratch, ra); - pop(ra); - lw(scratch, MemOperand(scratch, 6 * v8::internal::kInstrSize)); - } - jr(scratch); - nop(); // Branch delay slot nop. - for (size_t index = 0; index < case_count; ++index) { - dd(GetLabelFunction(index)); - } -} - -#define ACCESS_MASM(masm) masm-> - -} // namespace internal -} // namespace v8 - -#endif // V8_CODEGEN_MIPS_MACRO_ASSEMBLER_MIPS_H_ diff --git a/src/codegen/mips/register-mips.h b/src/codegen/mips/register-mips.h deleted file mode 100644 index 26f04401b9..0000000000 --- a/src/codegen/mips/register-mips.h +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2018 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CODEGEN_MIPS_REGISTER_MIPS_H_ -#define V8_CODEGEN_MIPS_REGISTER_MIPS_H_ - -#include "src/codegen/mips/constants-mips.h" -#include "src/codegen/register-base.h" - -namespace v8 { -namespace internal { - -// clang-format off -#define GENERAL_REGISTERS(V) \ - V(zero_reg) V(at) V(v0) V(v1) V(a0) V(a1) V(a2) V(a3) \ - V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) \ - V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(t8) V(t9) \ - V(k0) V(k1) V(gp) V(sp) V(fp) V(ra) - -#define ALLOCATABLE_GENERAL_REGISTERS(V) \ - V(a0) V(a1) V(a2) V(a3) \ - V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(s7) \ - V(v0) V(v1) - -#define DOUBLE_REGISTERS(V) \ - V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \ - V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \ - V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \ - V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31) - -// Currently, MIPS just use even float point register, except -// for C function param registers. -#define DOUBLE_USE_REGISTERS(V) \ - V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f13) \ - V(f14) V(f15) V(f16) V(f18) V(f20) V(f22) V(f24) V(f26) \ - V(f28) V(f30) - -#define FLOAT_REGISTERS DOUBLE_REGISTERS -#define SIMD128_REGISTERS(V) \ - V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \ - V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \ - V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \ - V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31) - -#define ALLOCATABLE_DOUBLE_REGISTERS(V) \ - V(f0) V(f2) V(f4) V(f6) V(f8) V(f10) V(f12) V(f14) \ - V(f16) V(f18) V(f20) V(f22) V(f24) -// clang-format on - -// Register lists. -// Note that the bit values must match those used in actual instruction -// encoding. -const int kNumRegs = 32; - -// CPU Registers. -// -// 1) We would prefer to use an enum, but enum values are assignment- -// compatible with int, which has caused code-generation bugs. -// -// 2) We would prefer to use a class instead of a struct but we don't like -// the register initialization to depend on the particular initialization -// order (which appears to be different on OS X, Linux, and Windows for the -// installed versions of C++ we tried). Using a struct permits C-style -// "initialization". Also, the Register objects cannot be const as this -// forces initialization stubs in MSVC, making us dependent on initialization -// order. -// -// 3) By not using an enum, we are possibly preventing the compiler from -// doing certain constant folds, which may significantly reduce the -// code generated for some assembly instructions (because they boil down -// to a few constants). If this is a problem, we could change the code -// such that we use an enum in optimized mode, and the struct in debug -// mode. This way we get the compile-time error checking in debug mode -// and best performance in optimized code. - -// ----------------------------------------------------------------------------- -// Implementation of Register and FPURegister. - -enum RegisterCode { -#define REGISTER_CODE(R) kRegCode_##R, - GENERAL_REGISTERS(REGISTER_CODE) -#undef REGISTER_CODE - kRegAfterLast -}; - -class Register : public RegisterBase { - public: -#if defined(V8_TARGET_LITTLE_ENDIAN) - static constexpr int kMantissaOffset = 0; - static constexpr int kExponentOffset = 4; -#elif defined(V8_TARGET_BIG_ENDIAN) - static constexpr int kMantissaOffset = 4; - static constexpr int kExponentOffset = 0; -#else -#error Unknown endianness -#endif - - private: - friend class RegisterBase; - explicit constexpr Register(int code) : RegisterBase(code) {} -}; - -// s7: context register -// s3: scratch register -// s4: scratch register 2 -#define DECLARE_REGISTER(R) \ - constexpr Register R = Register::from_code(kRegCode_##R); -GENERAL_REGISTERS(DECLARE_REGISTER) -#undef DECLARE_REGISTER -constexpr Register no_reg = Register::no_reg(); - -int ToNumber(Register reg); - -Register ToRegister(int num); - -// Returns the number of padding slots needed for stack pointer alignment. -constexpr int ArgumentPaddingSlots(int argument_count) { - // No argument padding required. - return 0; -} - -constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap; -constexpr bool kSimdMaskRegisters = false; - -enum DoubleRegisterCode { -#define REGISTER_CODE(R) kDoubleCode_##R, - DOUBLE_REGISTERS(REGISTER_CODE) -#undef REGISTER_CODE - kDoubleAfterLast -}; - -// Coprocessor register. -class FPURegister : public RegisterBase { - public: - FPURegister low() const { - // Find low reg of a Double-reg pair, which is the reg itself. - DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even. - return FPURegister::from_code(code()); - } - FPURegister high() const { - // Find high reg of a Doubel-reg pair, which is reg + 1. - DCHECK_EQ(code() % 2, 0); // Specified Double reg must be even. - return FPURegister::from_code(code() + 1); - } - - private: - friend class RegisterBase; - explicit constexpr FPURegister(int code) : RegisterBase(code) {} -}; - -enum MSARegisterCode { -#define REGISTER_CODE(R) kMsaCode_##R, - SIMD128_REGISTERS(REGISTER_CODE) -#undef REGISTER_CODE - kMsaAfterLast -}; - -// MIPS SIMD (MSA) register -class MSARegister : public RegisterBase { - friend class RegisterBase; - explicit constexpr MSARegister(int code) : RegisterBase(code) {} -}; - -// A few double registers are reserved: one as a scratch register and one to -// hold 0.0. -// f28: 0.0 -// f30: scratch register. - -// V8 now supports the O32 ABI, and the FPU Registers are organized as 32 -// 32-bit registers, f0 through f31. When used as 'double' they are used -// in pairs, starting with the even numbered register. So a double operation -// on f0 really uses f0 and f1. -// (Modern mips hardware also supports 32 64-bit registers, via setting -// (priviledged) Status Register FR bit to 1. This is used by the N32 ABI, -// but it is not in common use. Someday we will want to support this in v8.) - -// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers. -using FloatRegister = FPURegister; - -using DoubleRegister = FPURegister; - -#define DECLARE_DOUBLE_REGISTER(R) \ - constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R); -DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER) -#undef DECLARE_DOUBLE_REGISTER - -constexpr DoubleRegister no_dreg = DoubleRegister::no_reg(); - -// SIMD registers. -using Simd128Register = MSARegister; - -#define DECLARE_SIMD128_REGISTER(R) \ - constexpr Simd128Register R = Simd128Register::from_code(kMsaCode_##R); -SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER) -#undef DECLARE_SIMD128_REGISTER - -const Simd128Register no_msareg = Simd128Register::no_reg(); - -// Register aliases. -// cp is assumed to be a callee saved register. -constexpr Register kRootRegister = s6; -constexpr Register cp = s7; -constexpr Register kScratchReg = s3; -constexpr Register kScratchReg2 = s4; -constexpr DoubleRegister kScratchDoubleReg = f30; -constexpr DoubleRegister kDoubleRegZero = f28; -// Used on mips32r6 for compare operations. -constexpr DoubleRegister kDoubleCompareReg = f26; -// MSA zero and scratch regs must have the same numbers as FPU zero and scratch -constexpr Simd128Register kSimd128RegZero = w28; -constexpr Simd128Register kSimd128ScratchReg = w30; - -// FPU (coprocessor 1) control registers. -// Currently only FCSR (#31) is implemented. -struct FPUControlRegister { - bool is_valid() const { return reg_code == kFCSRRegister; } - bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; } - int code() const { - DCHECK(is_valid()); - return reg_code; - } - int bit() const { - DCHECK(is_valid()); - return 1 << reg_code; - } - void setcode(int f) { - reg_code = f; - DCHECK(is_valid()); - } - // Unfortunately we can't make this private in a struct. - int reg_code; -}; - -constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister}; -constexpr FPUControlRegister FCSR = {kFCSRRegister}; - -// MSA control registers -struct MSAControlRegister { - bool is_valid() const { - return (reg_code == kMSAIRRegister) || (reg_code == kMSACSRRegister); - } - bool is(MSAControlRegister creg) const { return reg_code == creg.reg_code; } - int code() const { - DCHECK(is_valid()); - return reg_code; - } - int bit() const { - DCHECK(is_valid()); - return 1 << reg_code; - } - void setcode(int f) { - reg_code = f; - DCHECK(is_valid()); - } - // Unfortunately we can't make this private in a struct. - int reg_code; -}; - -constexpr MSAControlRegister no_msacreg = {kInvalidMSAControlRegister}; -constexpr MSAControlRegister MSAIR = {kMSAIRRegister}; -constexpr MSAControlRegister MSACSR = {kMSACSRRegister}; - -// Define {RegisterName} methods for the register types. -DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS) -DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS) -DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS) - -// Give alias names to registers for calling conventions. -constexpr Register kReturnRegister0 = v0; -constexpr Register kReturnRegister1 = v1; -constexpr Register kReturnRegister2 = a0; -constexpr Register kJSFunctionRegister = a1; -constexpr Register kContextRegister = s7; -constexpr Register kAllocateSizeRegister = a0; -constexpr Register kInterpreterAccumulatorRegister = v0; -constexpr Register kInterpreterBytecodeOffsetRegister = t4; -constexpr Register kInterpreterBytecodeArrayRegister = t5; -constexpr Register kInterpreterDispatchTableRegister = t6; - -constexpr Register kJavaScriptCallArgCountRegister = a0; -constexpr Register kJavaScriptCallCodeStartRegister = a2; -constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister; -constexpr Register kJavaScriptCallNewTargetRegister = a3; -constexpr Register kJavaScriptCallExtraArg1Register = a2; - -constexpr Register kOffHeapTrampolineRegister = at; -constexpr Register kRuntimeCallFunctionRegister = a1; -constexpr Register kRuntimeCallArgCountRegister = a0; -constexpr Register kRuntimeCallArgvRegister = a2; -constexpr Register kWasmInstanceRegister = a0; -constexpr Register kWasmCompileLazyFuncIndexRegister = t0; - -constexpr DoubleRegister kFPReturnRegister0 = f0; - -} // namespace internal -} // namespace v8 - -#endif // V8_CODEGEN_MIPS_REGISTER_MIPS_H_ diff --git a/src/codegen/mips/reglist-mips.h b/src/codegen/mips/reglist-mips.h deleted file mode 100644 index 5c458858f6..0000000000 --- a/src/codegen/mips/reglist-mips.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2022 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_CODEGEN_MIPS_REGLIST_MIPS_H_ -#define V8_CODEGEN_MIPS_REGLIST_MIPS_H_ - -#include "src/codegen/mips/constants-mips.h" -#include "src/codegen/register-arch.h" -#include "src/codegen/reglist-base.h" - -namespace v8 { -namespace internal { - -using RegList = RegListBase; -using DoubleRegList = RegListBase; -ASSERT_TRIVIALLY_COPYABLE(RegList); -ASSERT_TRIVIALLY_COPYABLE(DoubleRegList); - -const RegList kJSCallerSaved = {v0, v1, a0, a1, a2, a3, t0, - t1, t2, t3, t4, t5, t6, t7}; - -const int kNumJSCallerSaved = 14; - -// Callee-saved registers preserved when switching from C to JavaScript. -const RegList kCalleeSaved = {s0, // s0 - s1, // s1 - s2, // s2 - s3, // s3 - s4, // s4 - s5, // s5 - s6, // s6 (roots in Javascript code) - s7, // s7 (cp in Javascript code) - fp}; // fp/s8 - -const int kNumCalleeSaved = 9; - -const DoubleRegList kCalleeSavedFPU = {f20, f22, f24, f26, f28, f30}; - -const int kNumCalleeSavedFPU = 6; - -const DoubleRegList kCallerSavedFPU = {f0, f2, f4, f6, f8, - f10, f12, f14, f16, f18}; - -} // namespace internal -} // namespace v8 - -#endif // V8_CODEGEN_MIPS_REGLIST_MIPS_H_ diff --git a/src/codegen/register-arch.h b/src/codegen/register-arch.h index 8ec621ce17..a97c2cc2b2 100644 --- a/src/codegen/register-arch.h +++ b/src/codegen/register-arch.h @@ -17,8 +17,6 @@ #include "src/codegen/arm/register-arm.h" #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/register-ppc.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/codegen/mips/register-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/register-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/codegen/reglist.h b/src/codegen/reglist.h index ea9358b0c4..2aa379407a 100644 --- a/src/codegen/reglist.h +++ b/src/codegen/reglist.h @@ -15,8 +15,6 @@ #include "src/codegen/arm/reglist-arm.h" #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/codegen/ppc/reglist-ppc.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/codegen/mips/reglist-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/reglist-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/codegen/reloc-info.cc b/src/codegen/reloc-info.cc index 70b85ef077..12c2f9641a 100644 --- a/src/codegen/reloc-info.cc +++ b/src/codegen/reloc-info.cc @@ -309,11 +309,10 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() { #if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_ARM64) || \ defined(V8_TARGET_ARCH_X64) return false; -#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \ - defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \ - defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \ - defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \ - defined(V8_TARGET_ARCH_RISCV32) +#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS64) || \ + defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \ + defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_RISCV64) || \ + defined(V8_TARGET_ARCH_LOONG64) || defined(V8_TARGET_ARCH_RISCV32) return true; #endif } diff --git a/src/codegen/reloc-info.h b/src/codegen/reloc-info.h index 8053d6a18e..12358d7b28 100644 --- a/src/codegen/reloc-info.h +++ b/src/codegen/reloc-info.h @@ -71,7 +71,7 @@ class RelocInfo { EXTERNAL_REFERENCE, // The address of an external C++ function. INTERNAL_REFERENCE, // An address inside the same function. - // Encoded internal reference, used only on RISCV64, RISCV32, MIPS, MIPS64 + // Encoded internal reference, used only on RISCV64, RISCV32, MIPS64 // and PPC. INTERNAL_REFERENCE_ENCODED, diff --git a/src/common/globals.h b/src/common/globals.h index 467a5197ec..68634d7d1c 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -46,9 +46,6 @@ namespace internal { #if (V8_TARGET_ARCH_PPC64 && !V8_HOST_ARCH_PPC64) #define USE_SIMULATOR 1 #endif -#if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS) -#define USE_SIMULATOR 1 -#endif #if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64) #define USE_SIMULATOR 1 #endif @@ -428,7 +425,7 @@ constexpr bool kPlatformRequiresCodeRange = false; constexpr size_t kMaximalCodeRangeSize = 0 * MB; constexpr size_t kMinimumCodeRangeSize = 0 * MB; constexpr size_t kMinExpectedOSPageSize = 64 * KB; // OS page on PPC Linux -#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_RISCV32 +#elif V8_TARGET_ARCH_RISCV32 constexpr bool kPlatformRequiresCodeRange = false; constexpr size_t kMaximalCodeRangeSize = 2048LL * MB; constexpr size_t kMinimumCodeRangeSize = 0 * MB; @@ -1359,9 +1356,7 @@ enum AllocationSiteMode { enum class AllocationSiteUpdateMode { kUpdate, kCheckOnly }; // The mips architecture prior to revision 5 has inverted encoding for sNaN. -#if (V8_TARGET_ARCH_MIPS && !defined(_MIPS_ARCH_MIPS32R6) && \ - (!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) || \ - (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \ +#if (V8_TARGET_ARCH_MIPS64 && !defined(_MIPS_ARCH_MIPS64R6) && \ (!defined(USE_SIMULATOR) || !defined(_MIPS_TARGET_SIMULATOR))) constexpr uint32_t kHoleNanUpper32 = 0xFFFF7FFF; constexpr uint32_t kHoleNanLower32 = 0xFFFF7FFF; diff --git a/src/compiler/backend/instruction-codes.h b/src/compiler/backend/instruction-codes.h index 2fe2cd1a74..31dfc864b2 100644 --- a/src/compiler/backend/instruction-codes.h +++ b/src/compiler/backend/instruction-codes.h @@ -13,8 +13,6 @@ #include "src/compiler/backend/arm64/instruction-codes-arm64.h" #elif V8_TARGET_ARCH_IA32 #include "src/compiler/backend/ia32/instruction-codes-ia32.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/compiler/backend/mips/instruction-codes-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/compiler/backend/mips64/instruction-codes-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc index 27cace8e42..2ef46c02eb 100644 --- a/src/compiler/backend/instruction-selector.cc +++ b/src/compiler/backend/instruction-selector.cc @@ -2698,8 +2698,7 @@ void InstructionSelector::VisitWord32PairShr(Node* node) { UNIMPLEMENTED(); } void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); } #endif // V8_TARGET_ARCH_64_BIT -#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS && \ - !V8_TARGET_ARCH_RISCV32 +#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_RISCV32 void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) { UNIMPLEMENTED(); } @@ -2735,7 +2734,7 @@ void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) { void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { UNIMPLEMENTED(); } -#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS +#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM // && !V8_TARGET_ARCH_RISCV32 #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \ diff --git a/src/compiler/backend/mips/code-generator-mips.cc b/src/compiler/backend/mips/code-generator-mips.cc deleted file mode 100644 index 09c5070896..0000000000 --- a/src/compiler/backend/mips/code-generator-mips.cc +++ /dev/null @@ -1,4443 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/codegen/assembler-inl.h" -#include "src/codegen/callable.h" -#include "src/codegen/macro-assembler.h" -#include "src/codegen/optimized-compilation-info.h" -#include "src/compiler/backend/code-generator-impl.h" -#include "src/compiler/backend/code-generator.h" -#include "src/compiler/backend/gap-resolver.h" -#include "src/compiler/node-matchers.h" -#include "src/compiler/osr.h" -#include "src/heap/memory-chunk.h" - -#if V8_ENABLE_WEBASSEMBLY -#include "src/wasm/wasm-code-manager.h" -#endif // V8_ENABLE_WEBASSEMBLY - -namespace v8 { -namespace internal { -namespace compiler { - -#define __ tasm()-> - -// TODO(plind): consider renaming these macros. -#define TRACE_MSG(msg) \ - PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \ - __LINE__) - -#define TRACE_UNIMPL() \ - PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \ - __LINE__) - -// Adds Mips-specific methods to convert InstructionOperands. -class MipsOperandConverter final : public InstructionOperandConverter { - public: - MipsOperandConverter(CodeGenerator* gen, Instruction* instr) - : InstructionOperandConverter(gen, instr) {} - - FloatRegister OutputSingleRegister(size_t index = 0) { - return ToSingleRegister(instr_->OutputAt(index)); - } - - FloatRegister InputSingleRegister(size_t index) { - return ToSingleRegister(instr_->InputAt(index)); - } - - FloatRegister ToSingleRegister(InstructionOperand* op) { - // Single (Float) and Double register namespace is same on MIPS, - // both are typedefs of FPURegister. - return ToDoubleRegister(op); - } - - Register InputOrZeroRegister(size_t index) { - if (instr_->InputAt(index)->IsImmediate()) { - DCHECK_EQ(0, InputInt32(index)); - return zero_reg; - } - return InputRegister(index); - } - - DoubleRegister InputOrZeroDoubleRegister(size_t index) { - if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; - - return InputDoubleRegister(index); - } - - DoubleRegister InputOrZeroSingleRegister(size_t index) { - if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero; - - return InputSingleRegister(index); - } - - Operand InputImmediate(size_t index) { - Constant constant = ToConstant(instr_->InputAt(index)); - switch (constant.type()) { - case Constant::kInt32: - return Operand(constant.ToInt32()); - case Constant::kFloat32: - return Operand::EmbeddedNumber(constant.ToFloat32()); - case Constant::kFloat64: - return Operand::EmbeddedNumber(constant.ToFloat64().value()); - case Constant::kInt64: - case Constant::kExternalReference: - case Constant::kCompressedHeapObject: - case Constant::kHeapObject: - // TODO(plind): Maybe we should handle ExtRef & HeapObj here? - // maybe not done on arm due to const pool ?? - break; - case Constant::kDelayedStringConstant: - return Operand::EmbeddedStringConstant( - constant.ToDelayedStringConstant()); - case Constant::kRpoNumber: - UNREACHABLE(); // TODO(titzer): RPO immediates on mips? - } - UNREACHABLE(); - } - - Operand InputOperand(size_t index) { - InstructionOperand* op = instr_->InputAt(index); - if (op->IsRegister()) { - return Operand(ToRegister(op)); - } - return InputImmediate(index); - } - - MemOperand MemoryOperand(size_t* first_index) { - const size_t index = *first_index; - switch (AddressingModeField::decode(instr_->opcode())) { - case kMode_None: - break; - case kMode_MRI: - *first_index += 2; - return MemOperand(InputRegister(index + 0), InputInt32(index + 1)); - case kMode_MRR: - // TODO(plind): r6 address mode, to be implemented ... - UNREACHABLE(); - } - UNREACHABLE(); - } - - MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); } - - MemOperand ToMemOperand(InstructionOperand* op) const { - DCHECK_NOT_NULL(op); - DCHECK(op->IsStackSlot() || op->IsFPStackSlot()); - return SlotToMemOperand(AllocatedOperand::cast(op)->index()); - } - - MemOperand SlotToMemOperand(int slot) const { - FrameOffset offset = frame_access_state()->GetFrameOffset(slot); - return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); - } -}; - -static inline bool HasRegisterInput(Instruction* instr, size_t index) { - return instr->InputAt(index)->IsRegister(); -} - -namespace { - -class OutOfLineRecordWrite final : public OutOfLineCode { - public: - OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register index, - Register value, Register scratch0, Register scratch1, - RecordWriteMode mode, StubCallMode stub_mode) - : OutOfLineCode(gen), - object_(object), - index_(index), - value_(value), - scratch0_(scratch0), - scratch1_(scratch1), - mode_(mode), -#if V8_ENABLE_WEBASSEMBLY - stub_mode_(stub_mode), -#endif // V8_ENABLE_WEBASSEMBLY - must_save_lr_(!gen->frame_access_state()->has_frame()), - zone_(gen->zone()) { - DCHECK(!AreAliased(object, index, scratch0, scratch1)); - DCHECK(!AreAliased(value, index, scratch0, scratch1)); - } - - void Generate() final { - __ CheckPageFlag(value_, scratch0_, - MemoryChunk::kPointersToHereAreInterestingMask, eq, - exit()); - __ Addu(scratch1_, object_, index_); - RememberedSetAction const remembered_set_action = - mode_ > RecordWriteMode::kValueIsMap || - FLAG_use_full_record_write_builtin - ? RememberedSetAction::kEmit - : RememberedSetAction::kOmit; - SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters() - ? SaveFPRegsMode::kSave - : SaveFPRegsMode::kIgnore; - if (must_save_lr_) { - // We need to save and restore ra if the frame was elided. - __ Push(ra); - } - - if (mode_ == RecordWriteMode::kValueIsEphemeronKey) { - __ CallEphemeronKeyBarrier(object_, scratch1_, save_fp_mode); -#if V8_ENABLE_WEBASSEMBLY - } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) { - // A direct call to a wasm runtime stub defined in this module. - // Just encode the stub index. This will be patched when the code - // is added to the native module and copied into wasm code space. - __ CallRecordWriteStubSaveRegisters(object_, scratch1_, - remembered_set_action, save_fp_mode, - StubCallMode::kCallWasmRuntimeStub); -#endif // V8_ENABLE_WEBASSEMBLY - } else { - __ CallRecordWriteStubSaveRegisters(object_, scratch1_, - remembered_set_action, save_fp_mode); - } - if (must_save_lr_) { - __ Pop(ra); - } - } - - private: - Register const object_; - Register const index_; - Register const value_; - Register const scratch0_; - Register const scratch1_; - RecordWriteMode const mode_; -#if V8_ENABLE_WEBASSEMBLY - StubCallMode const stub_mode_; -#endif // V8_ENABLE_WEBASSEMBLY - bool must_save_lr_; - Zone* zone_; -}; - -#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \ - class ool_name final : public OutOfLineCode { \ - public: \ - ool_name(CodeGenerator* gen, T dst, T src1, T src2) \ - : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \ - \ - void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \ - \ - private: \ - T const dst_; \ - T const src1_; \ - T const src2_; \ - } - -CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister); -CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister); -CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister); -CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister); - -#undef CREATE_OOL_CLASS - -Condition FlagsConditionToConditionCmp(FlagsCondition condition) { - switch (condition) { - case kEqual: - return eq; - case kNotEqual: - return ne; - case kSignedLessThan: - return lt; - case kSignedGreaterThanOrEqual: - return ge; - case kSignedLessThanOrEqual: - return le; - case kSignedGreaterThan: - return gt; - case kUnsignedLessThan: - return lo; - case kUnsignedGreaterThanOrEqual: - return hs; - case kUnsignedLessThanOrEqual: - return ls; - case kUnsignedGreaterThan: - return hi; - case kUnorderedEqual: - case kUnorderedNotEqual: - break; - default: - break; - } - UNREACHABLE(); -} - -Condition FlagsConditionToConditionTst(FlagsCondition condition) { - switch (condition) { - case kNotEqual: - return ne; - case kEqual: - return eq; - default: - break; - } - UNREACHABLE(); -} - -FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, - FlagsCondition condition) { - switch (condition) { - case kEqual: - *predicate = true; - return EQ; - case kNotEqual: - *predicate = false; - return EQ; - case kUnsignedLessThan: - *predicate = true; - return OLT; - case kUnsignedGreaterThanOrEqual: - *predicate = false; - return OLT; - case kUnsignedLessThanOrEqual: - *predicate = true; - return OLE; - case kUnsignedGreaterThan: - *predicate = false; - return OLE; - case kUnorderedEqual: - case kUnorderedNotEqual: - *predicate = true; - break; - default: - *predicate = true; - break; - } - UNREACHABLE(); -} - -#define UNSUPPORTED_COND(opcode, condition) \ - StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \ - << "\""; \ - UNIMPLEMENTED(); - -} // namespace - -#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ - do { \ - __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \ - __ sync(); \ - } while (0) - -#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \ - do { \ - __ sync(); \ - __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \ - __ sync(); \ - } while (0) - -#define ASSEMBLE_ATOMIC_BINOP(bin_instr) \ - do { \ - Label binop; \ - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ sync(); \ - __ bind(&binop); \ - __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ - __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \ - Operand(i.InputRegister(2))); \ - __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ - __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ - __ sync(); \ - } while (0) - -#define ASSEMBLE_ATOMIC64_LOGIC_BINOP(bin_instr, external) \ - do { \ - if (IsMipsArchVariant(kMips32r6)) { \ - Label binop; \ - Register oldval_low = \ - instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \ - Register oldval_high = \ - instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \ - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ sync(); \ - __ bind(&binop); \ - __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \ - __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \ - __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \ - oldval_high, i.InputRegister(2), i.InputRegister(3)); \ - __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \ - __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ - __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ - __ sync(); \ - } else { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ - __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \ - __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \ - __ PrepareCallCFunction(3, 0, kScratchReg); \ - __ CallCFunction(ExternalReference::external(), 3, 0); \ - __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \ - } \ - } while (0) - -#define ASSEMBLE_ATOMIC64_ARITH_BINOP(bin_instr, external) \ - do { \ - if (IsMipsArchVariant(kMips32r6)) { \ - Label binop; \ - Register oldval_low = \ - instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); \ - Register oldval_high = \ - instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); \ - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ sync(); \ - __ bind(&binop); \ - __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); \ - __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); \ - __ bin_instr(i.TempRegister(1), i.TempRegister(2), oldval_low, \ - oldval_high, i.InputRegister(2), i.InputRegister(3), \ - kScratchReg, kScratchReg2); \ - __ scx(i.TempRegister(2), MemOperand(i.TempRegister(0), 4)); \ - __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ - __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ - __ sync(); \ - } else { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ - __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); \ - __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \ - __ PrepareCallCFunction(3, 0, kScratchReg); \ - __ CallCFunction(ExternalReference::external(), 3, 0); \ - __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); \ - } \ - } while (0) - -#define ASSEMBLE_ATOMIC_BINOP_EXT(sign_extend, size, bin_instr) \ - do { \ - Label binop; \ - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \ - __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(3))); \ - __ sll(i.TempRegister(3), i.TempRegister(3), 3); \ - __ sync(); \ - __ bind(&binop); \ - __ Ll(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ - __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \ - size, sign_extend); \ - __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \ - Operand(i.InputRegister(2))); \ - __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \ - size); \ - __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ - __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \ - __ sync(); \ - } while (0) - -#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER() \ - do { \ - Label exchange; \ - __ sync(); \ - __ bind(&exchange); \ - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ - __ mov(i.TempRegister(1), i.InputRegister(2)); \ - __ Sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \ - __ BranchShort(&exchange, eq, i.TempRegister(1), Operand(zero_reg)); \ - __ sync(); \ - } while (0) - -#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(sign_extend, size) \ - do { \ - Label exchange; \ - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ - __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \ - __ sll(i.TempRegister(1), i.TempRegister(1), 3); \ - __ sync(); \ - __ bind(&exchange); \ - __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ - __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ - size, sign_extend); \ - __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \ - size); \ - __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ - __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \ - __ sync(); \ - } while (0) - -#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER() \ - do { \ - Label compareExchange; \ - Label exit; \ - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ sync(); \ - __ bind(&compareExchange); \ - __ Ll(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \ - __ BranchShort(&exit, ne, i.InputRegister(2), \ - Operand(i.OutputRegister(0))); \ - __ mov(i.TempRegister(2), i.InputRegister(3)); \ - __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ - __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ - Operand(zero_reg)); \ - __ bind(&exit); \ - __ sync(); \ - } while (0) - -#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(sign_extend, size) \ - do { \ - Label compareExchange; \ - Label exit; \ - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \ - __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \ - __ Subu(i.TempRegister(0), i.TempRegister(0), Operand(i.TempRegister(1))); \ - __ sll(i.TempRegister(1), i.TempRegister(1), 3); \ - __ sync(); \ - __ bind(&compareExchange); \ - __ Ll(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ - __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \ - size, sign_extend); \ - __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \ - sign_extend); \ - __ BranchShort(&exit, ne, i.InputRegister(2), \ - Operand(i.OutputRegister(0))); \ - __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \ - size); \ - __ Sc(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \ - __ BranchShort(&compareExchange, eq, i.TempRegister(2), \ - Operand(zero_reg)); \ - __ bind(&exit); \ - __ sync(); \ - } while (0) - -#define ASSEMBLE_IEEE754_BINOP(name) \ - do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ - __ PrepareCallCFunction(0, 2, kScratchReg); \ - __ MovToFloatParameters(i.InputDoubleRegister(0), \ - i.InputDoubleRegister(1)); \ - __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \ - /* Move the result in the double result register. */ \ - __ MovFromFloatResult(i.OutputDoubleRegister()); \ - } while (0) - -#define ASSEMBLE_IEEE754_UNOP(name) \ - do { \ - FrameScope scope(tasm(), StackFrame::MANUAL); \ - __ PrepareCallCFunction(0, 1, kScratchReg); \ - __ MovToFloatParameter(i.InputDoubleRegister(0)); \ - __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \ - /* Move the result in the double result register. */ \ - __ MovFromFloatResult(i.OutputDoubleRegister()); \ - } while (0) - -#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \ - do { \ - __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \ - i.InputSimd128Register(1)); \ - } while (0) - -#define ASSEMBLE_SIMD_EXTENDED_MULTIPLY(op0, op1) \ - do { \ - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); \ - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); \ - __ op0(kSimd128ScratchReg, kSimd128RegZero, i.InputSimd128Register(0)); \ - __ op0(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(1)); \ - __ op1(i.OutputSimd128Register(), kSimd128ScratchReg, kSimd128RegZero); \ - } while (0) - -void CodeGenerator::AssembleDeconstructFrame() { - __ mov(sp, fp); - __ Pop(ra, fp); -} - -void CodeGenerator::AssemblePrepareTailCall() { - if (frame_access_state()->has_frame()) { - __ lw(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); - __ lw(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - } - frame_access_state()->SetFrameAccessToSP(); -} -namespace { - -void AdjustStackPointerForTailCall(TurboAssembler* tasm, - FrameAccessState* state, - int new_slot_above_sp, - bool allow_shrinkage = true) { - int current_sp_offset = state->GetSPToFPSlotCount() + - StandardFrameConstants::kFixedSlotCountAboveFp; - int stack_slot_delta = new_slot_above_sp - current_sp_offset; - if (stack_slot_delta > 0) { - tasm->Subu(sp, sp, stack_slot_delta * kSystemPointerSize); - state->IncreaseSPDelta(stack_slot_delta); - } else if (allow_shrinkage && stack_slot_delta < 0) { - tasm->Addu(sp, sp, -stack_slot_delta * kSystemPointerSize); - state->IncreaseSPDelta(stack_slot_delta); - } -} - -} // namespace - -void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, - int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), - first_unused_slot_offset, false); -} - -void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr, - int first_unused_slot_offset) { - AdjustStackPointerForTailCall(tasm(), frame_access_state(), - first_unused_slot_offset); -} - -// Check that {kJavaScriptCallCodeStartRegister} is correct. -void CodeGenerator::AssembleCodeStartRegisterCheck() { - __ ComputeCodeStartAddress(kScratchReg); - __ Assert(eq, AbortReason::kWrongFunctionCodeStart, - kJavaScriptCallCodeStartRegister, Operand(kScratchReg)); -} - -// Check if the code object is marked for deoptimization. If it is, then it -// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need -// to: -// 1. read from memory the word that contains that bit, which can be found in -// the flags in the referenced {CodeDataContainer} object; -// 2. test kMarkedForDeoptimizationBit in those flags; and -// 3. if it is not zero then it jumps to the builtin. -void CodeGenerator::BailoutIfDeoptimized() { - int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize; - __ lw(kScratchReg, MemOperand(kJavaScriptCallCodeStartRegister, offset)); - __ lw(kScratchReg, - FieldMemOperand(kScratchReg, - CodeDataContainer::kKindSpecificFlagsOffset)); - __ And(kScratchReg, kScratchReg, - Operand(1 << Code::kMarkedForDeoptimizationBit)); - __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode), - RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); -} - -// Assembles an instruction after register allocation, producing machine code. -CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( - Instruction* instr) { - MipsOperandConverter i(this, instr); - InstructionCode opcode = instr->opcode(); - ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode); - switch (arch_opcode) { - case kArchCallCodeObject: { - if (instr->InputAt(0)->IsImmediate()) { - __ Call(i.InputCode(0), RelocInfo::CODE_TARGET); - } else { - Register reg = i.InputRegister(0); - DCHECK_IMPLIES( - instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), - reg == kJavaScriptCallCodeStartRegister); - __ Call(reg, reg, Code::kHeaderSize - kHeapObjectTag); - } - RecordCallPosition(instr); - frame_access_state()->ClearSPDelta(); - break; - } - case kArchCallBuiltinPointer: { - DCHECK(!instr->InputAt(0)->IsImmediate()); - Register builtin_index = i.InputRegister(0); - __ CallBuiltinByIndex(builtin_index); - RecordCallPosition(instr); - frame_access_state()->ClearSPDelta(); - break; - } -#if V8_ENABLE_WEBASSEMBLY - case kArchCallWasmFunction: { - if (instr->InputAt(0)->IsImmediate()) { - Constant constant = i.ToConstant(instr->InputAt(0)); - Address wasm_code = static_cast

(constant.ToInt32()); - __ Call(wasm_code, constant.rmode()); - } else { - __ Call(i.InputRegister(0)); - } - RecordCallPosition(instr); - frame_access_state()->ClearSPDelta(); - break; - } - case kArchTailCallWasm: { - if (instr->InputAt(0)->IsImmediate()) { - Constant constant = i.ToConstant(instr->InputAt(0)); - Address wasm_code = static_cast
(constant.ToInt32()); - __ Jump(wasm_code, constant.rmode()); - } else { - __ Jump(i.InputRegister(0)); - } - frame_access_state()->ClearSPDelta(); - frame_access_state()->SetFrameAccessToDefault(); - break; - } -#endif // V8_ENABLE_WEBASSEMBLY - case kArchTailCallCodeObject: { - if (instr->InputAt(0)->IsImmediate()) { - __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET); - } else { - Register reg = i.InputRegister(0); - DCHECK_IMPLIES( - instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), - reg == kJavaScriptCallCodeStartRegister); - __ Addu(reg, reg, Code::kHeaderSize - kHeapObjectTag); - __ Jump(reg); - } - frame_access_state()->ClearSPDelta(); - frame_access_state()->SetFrameAccessToDefault(); - break; - } - case kArchTailCallAddress: { - CHECK(!instr->InputAt(0)->IsImmediate()); - Register reg = i.InputRegister(0); - DCHECK_IMPLIES( - instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), - reg == kJavaScriptCallCodeStartRegister); - __ Jump(reg); - frame_access_state()->ClearSPDelta(); - frame_access_state()->SetFrameAccessToDefault(); - break; - } - case kArchCallJSFunction: { - Register func = i.InputRegister(0); - if (FLAG_debug_code) { - // Check the function's context matches the context argument. - __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); - __ Assert(eq, AbortReason::kWrongFunctionContext, cp, - Operand(kScratchReg)); - } - static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); - __ lw(a2, FieldMemOperand(func, JSFunction::kCodeOffset)); - __ Addu(a2, a2, Code::kHeaderSize - kHeapObjectTag); - __ Call(a2); - RecordCallPosition(instr); - frame_access_state()->ClearSPDelta(); - frame_access_state()->SetFrameAccessToDefault(); - break; - } - case kArchPrepareCallCFunction: { - int const num_parameters = MiscField::decode(instr->opcode()); - __ PrepareCallCFunction(num_parameters, kScratchReg); - // Frame alignment requires using FP-relative frame addressing. - frame_access_state()->SetFrameAccessToFP(); - break; - } - case kArchSaveCallerRegisters: { - fp_mode_ = - static_cast(MiscField::decode(instr->opcode())); - DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore || - fp_mode_ == SaveFPRegsMode::kSave); - // kReturnRegister0 should have been saved before entering the stub. - int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0); - DCHECK(IsAligned(bytes, kSystemPointerSize)); - DCHECK_EQ(0, frame_access_state()->sp_delta()); - frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); - DCHECK(!caller_registers_saved_); - caller_registers_saved_ = true; - break; - } - case kArchRestoreCallerRegisters: { - DCHECK(fp_mode_ == - static_cast(MiscField::decode(instr->opcode()))); - DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore || - fp_mode_ == SaveFPRegsMode::kSave); - // Don't overwrite the returned value. - int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0); - frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize)); - DCHECK_EQ(0, frame_access_state()->sp_delta()); - DCHECK(caller_registers_saved_); - caller_registers_saved_ = false; - break; - } - case kArchPrepareTailCall: - AssemblePrepareTailCall(); - break; - case kArchCallCFunction: { - int const num_parameters = MiscField::decode(instr->opcode()); -#if V8_ENABLE_WEBASSEMBLY - Label start_call; - bool isWasmCapiFunction = - linkage()->GetIncomingDescriptor()->IsWasmCapiFunction(); - // from start_call to return address. - int offset = __ root_array_available() ? 64 : 88; -#endif // V8_ENABLE_WEBASSEMBLY -#if V8_HOST_ARCH_MIPS - if (FLAG_debug_code) { - offset += 16; - } -#endif - -#if V8_ENABLE_WEBASSEMBLY - if (isWasmCapiFunction) { - // Put the return address in a stack slot. - __ mov(kScratchReg, ra); - __ bind(&start_call); - __ nal(); - __ nop(); - __ Addu(ra, ra, offset - 8); // 8 = nop + nal - __ sw(ra, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); - __ mov(ra, kScratchReg); - } -#endif // V8_ENABLE_WEBASSEMBLY - - if (instr->InputAt(0)->IsImmediate()) { - ExternalReference ref = i.InputExternalReference(0); - __ CallCFunction(ref, num_parameters); - } else { - Register func = i.InputRegister(0); - __ CallCFunction(func, num_parameters); - } - -#if V8_ENABLE_WEBASSEMBLY - if (isWasmCapiFunction) { - CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call)); - RecordSafepoint(instr->reference_map()); - } -#endif // V8_ENABLE_WEBASSEMBLY - - frame_access_state()->SetFrameAccessToDefault(); - // Ideally, we should decrement SP delta to match the change of stack - // pointer in CallCFunction. However, for certain architectures (e.g. - // ARM), there may be more strict alignment requirement, causing old SP - // to be saved on the stack. In those cases, we can not calculate the SP - // delta statically. - frame_access_state()->ClearSPDelta(); - if (caller_registers_saved_) { - // Need to re-sync SP delta introduced in kArchSaveCallerRegisters. - // Here, we assume the sequence to be: - // kArchSaveCallerRegisters; - // kArchCallCFunction; - // kArchRestoreCallerRegisters; - int bytes = - __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0); - frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize); - } - break; - } - case kArchJmp: - AssembleArchJump(i.InputRpo(0)); - break; - case kArchBinarySearchSwitch: - AssembleArchBinarySearchSwitch(instr); - break; - case kArchTableSwitch: - AssembleArchTableSwitch(instr); - break; - case kArchAbortCSADcheck: - DCHECK(i.InputRegister(0) == a0); - { - // We don't actually want to generate a pile of code for this, so just - // claim there is a stack frame, without generating one. - FrameScope scope(tasm(), StackFrame::NO_FRAME_TYPE); - __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSADcheck), - RelocInfo::CODE_TARGET); - } - __ stop(); - break; - case kArchDebugBreak: - __ DebugBreak(); - break; - case kArchComment: - __ RecordComment(reinterpret_cast(i.InputInt32(0))); - break; - case kArchNop: - case kArchThrowTerminator: - // don't emit code for nops. - break; - case kArchDeoptimize: { - DeoptimizationExit* exit = - BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore()); - __ Branch(exit->label()); - break; - } - case kArchRet: - AssembleReturn(instr->InputAt(0)); - break; - case kArchStackPointerGreaterThan: { - Register lhs_register = sp; - uint32_t offset; - if (ShouldApplyOffsetToStackCheck(instr, &offset)) { - lhs_register = i.TempRegister(1); - __ Subu(lhs_register, sp, offset); - } - __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register); - break; - } - case kArchStackCheckOffset: - __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); - break; - case kArchFramePointer: - __ mov(i.OutputRegister(), fp); - break; - case kArchParentFramePointer: - if (frame_access_state()->has_frame()) { - __ lw(i.OutputRegister(), MemOperand(fp, 0)); - } else { - __ mov(i.OutputRegister(), fp); - } - break; - case kArchTruncateDoubleToI: - __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), - i.InputDoubleRegister(0), DetermineStubCallMode()); - break; - case kArchStoreWithWriteBarrier: - case kArchAtomicStoreWithWriteBarrier: { - RecordWriteMode mode = - static_cast(MiscField::decode(instr->opcode())); - Register object = i.InputRegister(0); - Register index = i.InputRegister(1); - Register value = i.InputRegister(2); - Register scratch0 = i.TempRegister(0); - Register scratch1 = i.TempRegister(1); - auto ool = zone()->New(this, object, index, value, - scratch0, scratch1, mode, - DetermineStubCallMode()); - __ Addu(kScratchReg, object, index); - if (arch_opcode == kArchStoreWithWriteBarrier) { - __ sw(value, MemOperand(kScratchReg)); - } else { - DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode); - __ sync(); - __ sw(value, MemOperand(kScratchReg)); - __ sync(); - } - if (mode > RecordWriteMode::kValueIsPointer) { - __ JumpIfSmi(value, ool->exit()); - } - __ CheckPageFlag(object, scratch0, - MemoryChunk::kPointersFromHereAreInterestingMask, ne, - ool->entry()); - __ bind(ool->exit()); - break; - } - case kArchStackSlot: { - FrameOffset offset = - frame_access_state()->GetFrameOffset(i.InputInt32(0)); - Register base_reg = offset.from_stack_pointer() ? sp : fp; - __ Addu(i.OutputRegister(), base_reg, Operand(offset.offset())); - if (FLAG_debug_code > 0) { - // Verify that the output_register is properly aligned - __ And(kScratchReg, i.OutputRegister(), - Operand(kSystemPointerSize - 1)); - __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, kScratchReg, - Operand(zero_reg)); - } - break; - } - case kIeee754Float64Acos: - ASSEMBLE_IEEE754_UNOP(acos); - break; - case kIeee754Float64Acosh: - ASSEMBLE_IEEE754_UNOP(acosh); - break; - case kIeee754Float64Asin: - ASSEMBLE_IEEE754_UNOP(asin); - break; - case kIeee754Float64Asinh: - ASSEMBLE_IEEE754_UNOP(asinh); - break; - case kIeee754Float64Atan: - ASSEMBLE_IEEE754_UNOP(atan); - break; - case kIeee754Float64Atanh: - ASSEMBLE_IEEE754_UNOP(atanh); - break; - case kIeee754Float64Atan2: - ASSEMBLE_IEEE754_BINOP(atan2); - break; - case kIeee754Float64Cos: - ASSEMBLE_IEEE754_UNOP(cos); - break; - case kIeee754Float64Cosh: - ASSEMBLE_IEEE754_UNOP(cosh); - break; - case kIeee754Float64Cbrt: - ASSEMBLE_IEEE754_UNOP(cbrt); - break; - case kIeee754Float64Exp: - ASSEMBLE_IEEE754_UNOP(exp); - break; - case kIeee754Float64Expm1: - ASSEMBLE_IEEE754_UNOP(expm1); - break; - case kIeee754Float64Log: - ASSEMBLE_IEEE754_UNOP(log); - break; - case kIeee754Float64Log1p: - ASSEMBLE_IEEE754_UNOP(log1p); - break; - case kIeee754Float64Log10: - ASSEMBLE_IEEE754_UNOP(log10); - break; - case kIeee754Float64Log2: - ASSEMBLE_IEEE754_UNOP(log2); - break; - case kIeee754Float64Pow: - ASSEMBLE_IEEE754_BINOP(pow); - break; - case kIeee754Float64Sin: - ASSEMBLE_IEEE754_UNOP(sin); - break; - case kIeee754Float64Sinh: - ASSEMBLE_IEEE754_UNOP(sinh); - break; - case kIeee754Float64Tan: - ASSEMBLE_IEEE754_UNOP(tan); - break; - case kIeee754Float64Tanh: - ASSEMBLE_IEEE754_UNOP(tanh); - break; - case kMipsAdd: - __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsAddOvf: - __ AddOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), - kScratchReg); - break; - case kMipsSub: - __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsSubOvf: - __ SubOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), - kScratchReg); - break; - case kMipsMul: - __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsMulOvf: - __ MulOverflow(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1), - kScratchReg); - break; - case kMipsMulHigh: - __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsMulHighU: - __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsDiv: - __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - if (IsMipsArchVariant(kMips32r6)) { - __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); - } else { - __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); - } - break; - case kMipsDivU: - __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - if (IsMipsArchVariant(kMips32r6)) { - __ selnez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); - } else { - __ Movz(i.OutputRegister(), i.InputRegister(1), i.InputRegister(1)); - } - break; - case kMipsMod: - __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsModU: - __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsAnd: - __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsOr: - __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsNor: - if (instr->InputAt(1)->IsRegister()) { - __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - } else { - DCHECK_EQ(0, i.InputOperand(1).immediate()); - __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg); - } - break; - case kMipsXor: - __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsClz: - __ Clz(i.OutputRegister(), i.InputRegister(0)); - break; - case kMipsCtz: { - Register src = i.InputRegister(0); - Register dst = i.OutputRegister(); - __ Ctz(dst, src); - } break; - case kMipsPopcnt: { - Register src = i.InputRegister(0); - Register dst = i.OutputRegister(); - __ Popcnt(dst, src); - } break; - case kMipsShl: - if (instr->InputAt(1)->IsRegister()) { - __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); - } else { - int32_t imm = i.InputOperand(1).immediate(); - __ sll(i.OutputRegister(), i.InputRegister(0), imm); - } - break; - case kMipsShr: - if (instr->InputAt(1)->IsRegister()) { - __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); - } else { - int32_t imm = i.InputOperand(1).immediate(); - __ srl(i.OutputRegister(), i.InputRegister(0), imm); - } - break; - case kMipsSar: - if (instr->InputAt(1)->IsRegister()) { - __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); - } else { - int32_t imm = i.InputOperand(1).immediate(); - __ sra(i.OutputRegister(), i.InputRegister(0), imm); - } - break; - case kMipsShlPair: { - Register second_output = - instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0); - if (instr->InputAt(2)->IsRegister()) { - __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0), - i.InputRegister(1), i.InputRegister(2), kScratchReg, - kScratchReg2); - } else { - uint32_t imm = i.InputOperand(2).immediate(); - __ ShlPair(i.OutputRegister(0), second_output, i.InputRegister(0), - i.InputRegister(1), imm, kScratchReg); - } - } break; - case kMipsShrPair: { - Register second_output = - instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0); - if (instr->InputAt(2)->IsRegister()) { - __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0), - i.InputRegister(1), i.InputRegister(2), kScratchReg, - kScratchReg2); - } else { - uint32_t imm = i.InputOperand(2).immediate(); - __ ShrPair(i.OutputRegister(0), second_output, i.InputRegister(0), - i.InputRegister(1), imm, kScratchReg); - } - } break; - case kMipsSarPair: { - Register second_output = - instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0); - if (instr->InputAt(2)->IsRegister()) { - __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0), - i.InputRegister(1), i.InputRegister(2), kScratchReg, - kScratchReg2); - } else { - uint32_t imm = i.InputOperand(2).immediate(); - __ SarPair(i.OutputRegister(0), second_output, i.InputRegister(0), - i.InputRegister(1), imm, kScratchReg); - } - } break; - case kMipsExt: - __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), - i.InputInt8(2)); - break; - case kMipsIns: - if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) { - __ Ins(i.OutputRegister(), zero_reg, i.InputInt8(1), i.InputInt8(2)); - } else { - __ Ins(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), - i.InputInt8(2)); - } - break; - case kMipsRor: - __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsTst: - __ And(kScratchReg, i.InputRegister(0), i.InputOperand(1)); - break; - case kMipsCmp: - // Pseudo-instruction used for cmp/branch. No opcode emitted here. - break; - case kMipsMov: - // TODO(plind): Should we combine mov/li like this, or use separate instr? - // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType - if (HasRegisterInput(instr, 0)) { - __ mov(i.OutputRegister(), i.InputRegister(0)); - } else { - __ li(i.OutputRegister(), i.InputOperand(0)); - } - break; - case kMipsLsa: - DCHECK(instr->InputAt(2)->IsImmediate()); - __ Lsa(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), - i.InputInt8(2)); - break; - case kMipsCmpS: { - FPURegister left = i.InputOrZeroSingleRegister(0); - FPURegister right = i.InputOrZeroSingleRegister(1); - bool predicate; - FPUCondition cc = - FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); - - if ((left == kDoubleRegZero || right == kDoubleRegZero) && - !__ IsDoubleZeroRegSet()) { - __ Move(kDoubleRegZero, 0.0); - } - - __ CompareF32(cc, left, right); - } break; - case kMipsAddS: - // TODO(plind): add special case: combine mult & add. - __ add_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsSubS: - __ sub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsMulS: - // TODO(plind): add special case: right op is -1.0, see arm port. - __ mul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsDivS: - __ div_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsAbsS: - if (IsMipsArchVariant(kMips32r6)) { - __ abs_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); - } else { - __ mfc1(kScratchReg, i.InputSingleRegister(0)); - __ Ins(kScratchReg, zero_reg, 31, 1); - __ mtc1(kScratchReg, i.OutputSingleRegister()); - } - break; - case kMipsSqrtS: { - __ sqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); - break; - } - case kMipsMaxS: - __ max_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsMinS: - __ min_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsCmpD: { - FPURegister left = i.InputOrZeroDoubleRegister(0); - FPURegister right = i.InputOrZeroDoubleRegister(1); - bool predicate; - FPUCondition cc = - FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition()); - if ((left == kDoubleRegZero || right == kDoubleRegZero) && - !__ IsDoubleZeroRegSet()) { - __ Move(kDoubleRegZero, 0.0); - } - __ CompareF64(cc, left, right); - } break; - case kMipsAddPair: - __ AddPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0), - i.InputRegister(1), i.InputRegister(2), i.InputRegister(3), - kScratchReg, kScratchReg2); - break; - case kMipsSubPair: - __ SubPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0), - i.InputRegister(1), i.InputRegister(2), i.InputRegister(3), - kScratchReg, kScratchReg2); - break; - case kMipsMulPair: { - __ MulPair(i.OutputRegister(0), i.OutputRegister(1), i.InputRegister(0), - i.InputRegister(1), i.InputRegister(2), i.InputRegister(3), - kScratchReg, kScratchReg2); - } break; - case kMipsAddD: - // TODO(plind): add special case: combine mult & add. - __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsSubD: - __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsMaddS: - __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0), - i.InputFloatRegister(1), i.InputFloatRegister(2), - kScratchDoubleReg); - break; - case kMipsMaddD: - __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1), i.InputDoubleRegister(2), - kScratchDoubleReg); - break; - case kMipsMsubS: - __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0), - i.InputFloatRegister(1), i.InputFloatRegister(2), - kScratchDoubleReg); - break; - case kMipsMsubD: - __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1), i.InputDoubleRegister(2), - kScratchDoubleReg); - break; - case kMipsMulD: - // TODO(plind): add special case: right op is -1.0, see arm port. - __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsDivD: - __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsModD: { - // TODO(bmeurer): We should really get rid of this special instruction, - // and generate a CallAddress instruction instead. - FrameScope scope(tasm(), StackFrame::MANUAL); - __ PrepareCallCFunction(0, 2, kScratchReg); - __ MovToFloatParameters(i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2); - // Move the result in the double result register. - __ MovFromFloatResult(i.OutputDoubleRegister()); - break; - } - case kMipsAbsD: { - FPURegister src = i.InputDoubleRegister(0); - FPURegister dst = i.OutputDoubleRegister(); - if (IsMipsArchVariant(kMips32r6)) { - __ abs_d(dst, src); - } else { - __ Move(dst, src); - __ mfhc1(kScratchReg, src); - __ Ins(kScratchReg, zero_reg, 31, 1); - __ mthc1(kScratchReg, dst); - } - break; - } - case kMipsNegS: - __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); - break; - case kMipsNegD: - __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); - break; - case kMipsSqrtD: { - __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); - break; - } - case kMipsMaxD: - __ max_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsMinD: - __ min_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0), - i.InputDoubleRegister(1)); - break; - case kMipsFloat64RoundDown: { - __ Floor_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); - break; - } - case kMipsFloat32RoundDown: { - __ Floor_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); - break; - } - case kMipsFloat64RoundTruncate: { - __ Trunc_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); - break; - } - case kMipsFloat32RoundTruncate: { - __ Trunc_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); - break; - } - case kMipsFloat64RoundUp: { - __ Ceil_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); - break; - } - case kMipsFloat32RoundUp: { - __ Ceil_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); - break; - } - case kMipsFloat64RoundTiesEven: { - __ Round_d_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); - break; - } - case kMipsFloat32RoundTiesEven: { - __ Round_s_s(i.OutputSingleRegister(), i.InputSingleRegister(0)); - break; - } - case kMipsFloat32Max: { - FPURegister dst = i.OutputSingleRegister(); - FPURegister src1 = i.InputSingleRegister(0); - FPURegister src2 = i.InputSingleRegister(1); - auto ool = zone()->New(this, dst, src1, src2); - __ Float32Max(dst, src1, src2, ool->entry()); - __ bind(ool->exit()); - break; - } - case kMipsFloat64Max: { - DoubleRegister dst = i.OutputDoubleRegister(); - DoubleRegister src1 = i.InputDoubleRegister(0); - DoubleRegister src2 = i.InputDoubleRegister(1); - auto ool = zone()->New(this, dst, src1, src2); - __ Float64Max(dst, src1, src2, ool->entry()); - __ bind(ool->exit()); - break; - } - case kMipsFloat32Min: { - FPURegister dst = i.OutputSingleRegister(); - FPURegister src1 = i.InputSingleRegister(0); - FPURegister src2 = i.InputSingleRegister(1); - auto ool = zone()->New(this, dst, src1, src2); - __ Float32Min(dst, src1, src2, ool->entry()); - __ bind(ool->exit()); - break; - } - case kMipsFloat64Min: { - DoubleRegister dst = i.OutputDoubleRegister(); - DoubleRegister src1 = i.InputDoubleRegister(0); - DoubleRegister src2 = i.InputDoubleRegister(1); - auto ool = zone()->New(this, dst, src1, src2); - __ Float64Min(dst, src1, src2, ool->entry()); - __ bind(ool->exit()); - break; - } - case kMipsCvtSD: { - __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0)); - break; - } - case kMipsCvtDS: { - __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0)); - break; - } - case kMipsCvtDW: { - FPURegister scratch = kScratchDoubleReg; - __ mtc1(i.InputRegister(0), scratch); - __ cvt_d_w(i.OutputDoubleRegister(), scratch); - break; - } - case kMipsCvtSW: { - FPURegister scratch = kScratchDoubleReg; - __ mtc1(i.InputRegister(0), scratch); - __ cvt_s_w(i.OutputDoubleRegister(), scratch); - break; - } - case kMipsCvtSUw: { - FPURegister scratch = kScratchDoubleReg; - __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch); - __ cvt_s_d(i.OutputDoubleRegister(), i.OutputDoubleRegister()); - break; - } - case kMipsCvtDUw: { - FPURegister scratch = kScratchDoubleReg; - __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch); - break; - } - case kMipsFloorWD: { - FPURegister scratch = kScratchDoubleReg; - __ Floor_w_d(scratch, i.InputDoubleRegister(0)); - __ mfc1(i.OutputRegister(), scratch); - break; - } - case kMipsCeilWD: { - FPURegister scratch = kScratchDoubleReg; - __ Ceil_w_d(scratch, i.InputDoubleRegister(0)); - __ mfc1(i.OutputRegister(), scratch); - break; - } - case kMipsRoundWD: { - FPURegister scratch = kScratchDoubleReg; - __ Round_w_d(scratch, i.InputDoubleRegister(0)); - __ mfc1(i.OutputRegister(), scratch); - break; - } - case kMipsTruncWD: { - FPURegister scratch = kScratchDoubleReg; - // Other arches use round to zero here, so we follow. - __ Trunc_w_d(scratch, i.InputDoubleRegister(0)); - __ mfc1(i.OutputRegister(), scratch); - break; - } - case kMipsFloorWS: { - FPURegister scratch = kScratchDoubleReg; - __ floor_w_s(scratch, i.InputDoubleRegister(0)); - __ mfc1(i.OutputRegister(), scratch); - break; - } - case kMipsCeilWS: { - FPURegister scratch = kScratchDoubleReg; - __ ceil_w_s(scratch, i.InputDoubleRegister(0)); - __ mfc1(i.OutputRegister(), scratch); - break; - } - case kMipsRoundWS: { - FPURegister scratch = kScratchDoubleReg; - __ round_w_s(scratch, i.InputDoubleRegister(0)); - __ mfc1(i.OutputRegister(), scratch); - break; - } - case kMipsTruncWS: { - FPURegister scratch = kScratchDoubleReg; - __ trunc_w_s(scratch, i.InputDoubleRegister(0)); - __ mfc1(i.OutputRegister(), scratch); - // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, - // because INT32_MIN allows easier out-of-bounds detection. - bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode()); - if (set_overflow_to_min_i32) { - __ Addu(kScratchReg, i.OutputRegister(), 1); - __ Slt(kScratchReg2, kScratchReg, i.OutputRegister()); - __ Movn(i.OutputRegister(), kScratchReg, kScratchReg2); - } - break; - } - case kMipsTruncUwD: { - FPURegister scratch = kScratchDoubleReg; - __ Trunc_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch); - break; - } - case kMipsTruncUwS: { - FPURegister scratch = kScratchDoubleReg; - __ Trunc_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch); - // Avoid UINT32_MAX as an overflow indicator and use 0 instead, - // because 0 allows easier out-of-bounds detection. - bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode()); - if (set_overflow_to_min_i32) { - __ Addu(kScratchReg, i.OutputRegister(), 1); - __ Movz(i.OutputRegister(), zero_reg, kScratchReg); - } - break; - } - case kMipsFloat64ExtractLowWord32: - __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0)); - break; - case kMipsFloat64ExtractHighWord32: - __ FmoveHigh(i.OutputRegister(), i.InputDoubleRegister(0)); - break; - case kMipsFloat64InsertLowWord32: - __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1)); - break; - case kMipsFloat64InsertHighWord32: - __ FmoveHigh(i.OutputDoubleRegister(), i.InputRegister(1)); - break; - case kMipsFloat64SilenceNaN: - __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); - break; - - // ... more basic instructions ... - case kMipsSeb: - __ Seb(i.OutputRegister(), i.InputRegister(0)); - break; - case kMipsSeh: - __ Seh(i.OutputRegister(), i.InputRegister(0)); - break; - case kMipsLbu: - __ lbu(i.OutputRegister(), i.MemoryOperand()); - break; - case kMipsLb: - __ lb(i.OutputRegister(), i.MemoryOperand()); - break; - case kMipsSb: - __ sb(i.InputOrZeroRegister(2), i.MemoryOperand()); - break; - case kMipsLhu: - __ lhu(i.OutputRegister(), i.MemoryOperand()); - break; - case kMipsUlhu: - __ Ulhu(i.OutputRegister(), i.MemoryOperand()); - break; - case kMipsLh: - __ lh(i.OutputRegister(), i.MemoryOperand()); - break; - case kMipsUlh: - __ Ulh(i.OutputRegister(), i.MemoryOperand()); - break; - case kMipsSh: - __ sh(i.InputOrZeroRegister(2), i.MemoryOperand()); - break; - case kMipsUsh: - __ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg); - break; - case kMipsLw: - __ lw(i.OutputRegister(), i.MemoryOperand()); - break; - case kMipsUlw: - __ Ulw(i.OutputRegister(), i.MemoryOperand()); - break; - case kMipsSw: - __ sw(i.InputOrZeroRegister(2), i.MemoryOperand()); - break; - case kMipsUsw: - __ Usw(i.InputOrZeroRegister(2), i.MemoryOperand()); - break; - case kMipsLwc1: { - __ lwc1(i.OutputSingleRegister(), i.MemoryOperand()); - break; - } - case kMipsUlwc1: { - __ Ulwc1(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg); - break; - } - case kMipsSwc1: { - size_t index = 0; - MemOperand operand = i.MemoryOperand(&index); - FPURegister ft = i.InputOrZeroSingleRegister(index); - if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { - __ Move(kDoubleRegZero, 0.0); - } - __ swc1(ft, operand); - break; - } - case kMipsUswc1: { - size_t index = 0; - MemOperand operand = i.MemoryOperand(&index); - FPURegister ft = i.InputOrZeroSingleRegister(index); - if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { - __ Move(kDoubleRegZero, 0.0); - } - __ Uswc1(ft, operand, kScratchReg); - break; - } - case kMipsLdc1: - __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); - break; - case kMipsUldc1: - __ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg); - break; - case kMipsSdc1: { - FPURegister ft = i.InputOrZeroDoubleRegister(2); - if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { - __ Move(kDoubleRegZero, 0.0); - } - __ Sdc1(ft, i.MemoryOperand()); - break; - } - case kMipsUsdc1: { - FPURegister ft = i.InputOrZeroDoubleRegister(2); - if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) { - __ Move(kDoubleRegZero, 0.0); - } - __ Usdc1(ft, i.MemoryOperand(), kScratchReg); - break; - } - case kMipsSync: { - __ sync(); - break; - } - case kMipsPush: - if (instr->InputAt(0)->IsFPRegister()) { - LocationOperand* op = LocationOperand::cast(instr->InputAt(0)); - switch (op->representation()) { - case MachineRepresentation::kFloat32: - __ swc1(i.InputFloatRegister(0), MemOperand(sp, -kFloatSize)); - __ Subu(sp, sp, Operand(kFloatSize)); - frame_access_state()->IncreaseSPDelta(kFloatSize / - kSystemPointerSize); - break; - case MachineRepresentation::kFloat64: - __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); - __ Subu(sp, sp, Operand(kDoubleSize)); - frame_access_state()->IncreaseSPDelta(kDoubleSize / - kSystemPointerSize); - break; - default: { - UNREACHABLE(); - } - } - } else { - __ Push(i.InputRegister(0)); - frame_access_state()->IncreaseSPDelta(1); - } - break; - case kMipsPeek: { - int reverse_slot = i.InputInt32(0); - int offset = - FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot); - if (instr->OutputAt(0)->IsFPRegister()) { - LocationOperand* op = LocationOperand::cast(instr->OutputAt(0)); - if (op->representation() == MachineRepresentation::kFloat64) { - __ Ldc1(i.OutputDoubleRegister(), MemOperand(fp, offset)); - } else if (op->representation() == MachineRepresentation::kFloat32) { - __ lwc1(i.OutputSingleRegister(0), MemOperand(fp, offset)); - } else { - DCHECK_EQ(op->representation(), MachineRepresentation::kSimd128); - __ ld_b(i.OutputSimd128Register(), MemOperand(fp, offset)); - } - } else { - __ lw(i.OutputRegister(0), MemOperand(fp, offset)); - } - break; - } - case kMipsStackClaim: { - __ Subu(sp, sp, Operand(i.InputInt32(0))); - frame_access_state()->IncreaseSPDelta(i.InputInt32(0) / - kSystemPointerSize); - break; - } - case kMipsStoreToStackSlot: { - if (instr->InputAt(0)->IsFPRegister()) { - LocationOperand* op = LocationOperand::cast(instr->InputAt(0)); - if (op->representation() == MachineRepresentation::kFloat64) { - __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); - } else if (op->representation() == MachineRepresentation::kFloat32) { - __ swc1(i.InputSingleRegister(0), MemOperand(sp, i.InputInt32(1))); - } else { - DCHECK_EQ(MachineRepresentation::kSimd128, op->representation()); - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ st_b(i.InputSimd128Register(0), MemOperand(sp, i.InputInt32(1))); - } - } else { - __ sw(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); - } - break; - } - case kMipsByteSwap32: { - __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4); - break; - } - case kMipsS128Load8Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ lb(kScratchReg, i.MemoryOperand()); - __ fill_b(i.OutputSimd128Register(), kScratchReg); - break; - } - case kMipsS128Load16Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ lh(kScratchReg, i.MemoryOperand()); - __ fill_h(i.OutputSimd128Register(), kScratchReg); - break; - } - case kMipsS128Load32Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ Lw(kScratchReg, i.MemoryOperand()); - __ fill_w(i.OutputSimd128Register(), kScratchReg); - break; - } - case kMipsS128Load64Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - MemOperand memLow = i.MemoryOperand(); - MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); - __ Lw(kScratchReg, memLow); - __ fill_w(dst, kScratchReg); - __ Lw(kScratchReg, memHigh); - __ fill_w(kSimd128ScratchReg, kScratchReg); - __ ilvr_w(dst, kSimd128ScratchReg, dst); - break; - } - case kMipsS128Load8x8S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - MemOperand memLow = i.MemoryOperand(); - MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); - __ Lw(kScratchReg, memLow); - __ fill_w(dst, kScratchReg); - __ Lw(kScratchReg, memHigh); - __ fill_w(kSimd128ScratchReg, kScratchReg); - __ ilvr_w(dst, kSimd128ScratchReg, dst); - __ clti_s_b(kSimd128ScratchReg, dst, 0); - __ ilvr_b(dst, kSimd128ScratchReg, dst); - break; - } - case kMipsS128Load8x8U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - MemOperand memLow = i.MemoryOperand(); - MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); - __ Lw(kScratchReg, memLow); - __ fill_w(dst, kScratchReg); - __ Lw(kScratchReg, memHigh); - __ fill_w(kSimd128ScratchReg, kScratchReg); - __ ilvr_w(dst, kSimd128ScratchReg, dst); - __ ilvr_b(dst, kSimd128RegZero, dst); - break; - } - case kMipsS128Load16x4S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - MemOperand memLow = i.MemoryOperand(); - MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); - __ Lw(kScratchReg, memLow); - __ fill_w(dst, kScratchReg); - __ Lw(kScratchReg, memHigh); - __ fill_w(kSimd128ScratchReg, kScratchReg); - __ ilvr_w(dst, kSimd128ScratchReg, dst); - __ clti_s_h(kSimd128ScratchReg, dst, 0); - __ ilvr_h(dst, kSimd128ScratchReg, dst); - break; - } - case kMipsS128Load16x4U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - MemOperand memLow = i.MemoryOperand(); - MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); - __ Lw(kScratchReg, memLow); - __ fill_w(dst, kScratchReg); - __ Lw(kScratchReg, memHigh); - __ fill_w(kSimd128ScratchReg, kScratchReg); - __ ilvr_w(dst, kSimd128ScratchReg, dst); - __ ilvr_h(dst, kSimd128RegZero, dst); - break; - } - case kMipsS128Load32x2S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - MemOperand memLow = i.MemoryOperand(); - MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); - __ Lw(kScratchReg, memLow); - __ fill_w(dst, kScratchReg); - __ Lw(kScratchReg, memHigh); - __ fill_w(kSimd128ScratchReg, kScratchReg); - __ ilvr_w(dst, kSimd128ScratchReg, dst); - __ clti_s_w(kSimd128ScratchReg, dst, 0); - __ ilvr_w(dst, kSimd128ScratchReg, dst); - break; - } - case kMipsS128Load32x2U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - MemOperand memLow = i.MemoryOperand(); - MemOperand memHigh = MemOperand(memLow.rm(), memLow.offset() + 4); - __ Lw(kScratchReg, memLow); - __ fill_w(dst, kScratchReg); - __ Lw(kScratchReg, memHigh); - __ fill_w(kSimd128ScratchReg, kScratchReg); - __ ilvr_w(dst, kSimd128ScratchReg, dst); - __ ilvr_w(dst, kSimd128RegZero, dst); - break; - } - case kAtomicLoadInt8: - ASSEMBLE_ATOMIC_LOAD_INTEGER(lb); - break; - case kAtomicLoadUint8: - ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu); - break; - case kAtomicLoadInt16: - ASSEMBLE_ATOMIC_LOAD_INTEGER(lh); - break; - case kAtomicLoadUint16: - ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu); - break; - case kAtomicLoadWord32: - ASSEMBLE_ATOMIC_LOAD_INTEGER(lw); - break; - case kAtomicStoreWord8: - ASSEMBLE_ATOMIC_STORE_INTEGER(sb); - break; - case kAtomicStoreWord16: - ASSEMBLE_ATOMIC_STORE_INTEGER(sh); - break; - case kAtomicStoreWord32: - ASSEMBLE_ATOMIC_STORE_INTEGER(sw); - break; - case kAtomicExchangeInt8: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8); - break; - case kAtomicExchangeUint8: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8); - break; - case kAtomicExchangeInt16: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16); - break; - case kAtomicExchangeUint16: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16); - break; - case kAtomicExchangeWord32: - ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(); - break; - case kAtomicCompareExchangeInt8: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8); - break; - case kAtomicCompareExchangeUint8: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8); - break; - case kAtomicCompareExchangeInt16: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16); - break; - case kAtomicCompareExchangeUint16: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16); - break; - case kAtomicCompareExchangeWord32: - ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(); - break; -#define ATOMIC_BINOP_CASE(op, inst) \ - case kAtomic##op##Int8: \ - ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \ - break; \ - case kAtomic##op##Uint8: \ - ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \ - break; \ - case kAtomic##op##Int16: \ - ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \ - break; \ - case kAtomic##op##Uint16: \ - ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \ - break; \ - case kAtomic##op##Word32: \ - ASSEMBLE_ATOMIC_BINOP(inst); \ - break; - ATOMIC_BINOP_CASE(Add, Addu) - ATOMIC_BINOP_CASE(Sub, Subu) - ATOMIC_BINOP_CASE(And, And) - ATOMIC_BINOP_CASE(Or, Or) - ATOMIC_BINOP_CASE(Xor, Xor) -#undef ATOMIC_BINOP_CASE - case kMipsWord32AtomicPairLoad: { - if (IsMipsArchVariant(kMips32r6)) { - if (instr->OutputCount() > 0) { - Register second_output = instr->OutputCount() == 2 - ? i.OutputRegister(1) - : i.TempRegister(1); - __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); - __ llx(second_output, MemOperand(a0, 4)); - __ ll(i.OutputRegister(0), MemOperand(a0, 0)); - __ sync(); - } - } else { - FrameScope scope(tasm(), StackFrame::MANUAL); - __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); - __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); - __ PrepareCallCFunction(1, 0, kScratchReg); - __ CallCFunction(ExternalReference::atomic_pair_load_function(), 1, 0); - __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); - } - break; - } - case kMipsWord32AtomicPairStore: { - if (IsMipsArchVariant(kMips32r6)) { - Label store; - __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); - __ sync(); - __ bind(&store); - __ llx(i.TempRegister(2), MemOperand(a0, 4)); - __ ll(i.TempRegister(1), MemOperand(a0, 0)); - __ Move(i.TempRegister(1), i.InputRegister(2)); - __ scx(i.InputRegister(3), MemOperand(a0, 4)); - __ sc(i.TempRegister(1), MemOperand(a0, 0)); - __ BranchShort(&store, eq, i.TempRegister(1), Operand(zero_reg)); - __ sync(); - } else { - FrameScope scope(tasm(), StackFrame::MANUAL); - __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); - __ PushCallerSaved(SaveFPRegsMode::kIgnore); - __ PrepareCallCFunction(3, 0, kScratchReg); - __ CallCFunction(ExternalReference::atomic_pair_store_function(), 3, 0); - __ PopCallerSaved(SaveFPRegsMode::kIgnore); - } - break; - } -#define ATOMIC64_BINOP_ARITH_CASE(op, instr, external) \ - case kMipsWord32AtomicPair##op: \ - ASSEMBLE_ATOMIC64_ARITH_BINOP(instr, external); \ - break; - ATOMIC64_BINOP_ARITH_CASE(Add, AddPair, atomic_pair_add_function) - ATOMIC64_BINOP_ARITH_CASE(Sub, SubPair, atomic_pair_sub_function) -#undef ATOMIC64_BINOP_ARITH_CASE -#define ATOMIC64_BINOP_LOGIC_CASE(op, instr, external) \ - case kMipsWord32AtomicPair##op: \ - ASSEMBLE_ATOMIC64_LOGIC_BINOP(instr, external); \ - break; - ATOMIC64_BINOP_LOGIC_CASE(And, AndPair, atomic_pair_and_function) - ATOMIC64_BINOP_LOGIC_CASE(Or, OrPair, atomic_pair_or_function) - ATOMIC64_BINOP_LOGIC_CASE(Xor, XorPair, atomic_pair_xor_function) -#undef ATOMIC64_BINOP_LOGIC_CASE - case kMipsWord32AtomicPairExchange: - if (IsMipsArchVariant(kMips32r6)) { - Label binop; - Register oldval_low = - instr->OutputCount() >= 1 ? i.OutputRegister(0) : i.TempRegister(1); - Register oldval_high = - instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(2); - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); - __ sync(); - __ bind(&binop); - __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); - __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); - __ Move(i.TempRegister(1), i.InputRegister(2)); - __ scx(i.InputRegister(3), MemOperand(i.TempRegister(0), 4)); - __ sc(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); - __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); - __ sync(); - } else { - FrameScope scope(tasm(), StackFrame::MANUAL); - __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); - __ PrepareCallCFunction(3, 0, kScratchReg); - __ Addu(a0, i.InputRegister(0), i.InputRegister(1)); - __ CallCFunction(ExternalReference::atomic_pair_exchange_function(), 3, - 0); - __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); - } - break; - case kMipsWord32AtomicPairCompareExchange: { - if (IsMipsArchVariant(kMips32r6)) { - Label compareExchange, exit; - Register oldval_low = - instr->OutputCount() >= 1 ? i.OutputRegister(0) : kScratchReg; - Register oldval_high = - instr->OutputCount() >= 2 ? i.OutputRegister(1) : kScratchReg2; - __ Addu(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); - __ sync(); - __ bind(&compareExchange); - __ llx(oldval_high, MemOperand(i.TempRegister(0), 4)); - __ ll(oldval_low, MemOperand(i.TempRegister(0), 0)); - __ BranchShort(&exit, ne, i.InputRegister(2), Operand(oldval_low)); - __ BranchShort(&exit, ne, i.InputRegister(3), Operand(oldval_high)); - __ mov(kScratchReg, i.InputRegister(4)); - __ scx(i.InputRegister(5), MemOperand(i.TempRegister(0), 4)); - __ sc(kScratchReg, MemOperand(i.TempRegister(0), 0)); - __ BranchShort(&compareExchange, eq, kScratchReg, Operand(zero_reg)); - __ bind(&exit); - __ sync(); - } else { - FrameScope scope(tasm(), StackFrame::MANUAL); - __ PushCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); - __ PrepareCallCFunction(5, 0, kScratchReg); - __ addu(a0, i.InputRegister(0), i.InputRegister(1)); - __ sw(i.InputRegister(5), MemOperand(sp, 16)); - __ CallCFunction( - ExternalReference::atomic_pair_compare_exchange_function(), 5, 0); - __ PopCallerSaved(SaveFPRegsMode::kIgnore, v0, v1); - } - break; - } - case kMipsS128Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(i.OutputSimd128Register(), i.OutputSimd128Register(), - i.OutputSimd128Register()); - break; - } - case kMipsI32x4Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fill_w(i.OutputSimd128Register(), i.InputRegister(0)); - break; - } - case kMipsI32x4ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ copy_s_w(i.OutputRegister(), i.InputSimd128Register(0), - i.InputInt8(1)); - break; - } - case kMipsI32x4ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - if (src != dst) { - __ move_v(dst, src); - } - __ insert_w(dst, i.InputInt8(1), i.InputRegister(2)); - break; - } - case kMipsI32x4Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ addv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI32x4Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ subv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI32x4ExtAddPairwiseI16x8S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ hadd_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(0)); - break; - } - case kMipsI32x4ExtAddPairwiseI16x8U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ hadd_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(0)); - break; - } - case kMipsF64x2Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ bclri_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63); - break; - } - case kMipsF64x2Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ bnegi_d(i.OutputSimd128Register(), i.InputSimd128Register(0), 63); - break; - } - case kMipsF64x2Sqrt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fsqrt_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); - break; - } - case kMipsF64x2Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - ASSEMBLE_F64X2_ARITHMETIC_BINOP(fadd_d); - break; - } - case kMipsF64x2Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - ASSEMBLE_F64X2_ARITHMETIC_BINOP(fsub_d); - break; - } - case kMipsF64x2Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmul_d); - break; - } - case kMipsF64x2Div: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - ASSEMBLE_F64X2_ARITHMETIC_BINOP(fdiv_d); - break; - } - case kMipsF64x2Min: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmin_d); - break; - } - case kMipsF64x2Max: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - ASSEMBLE_F64X2_ARITHMETIC_BINOP(fmax_d); - break; - } - case kMipsF64x2Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF64x2Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fcne_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF64x2Lt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fclt_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF64x2Le: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fcle_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF64x2Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - __ FmoveLow(kScratchReg, i.InputDoubleRegister(0)); - __ insert_w(dst, 0, kScratchReg); - __ insert_w(dst, 2, kScratchReg); - __ FmoveHigh(kScratchReg, i.InputDoubleRegister(0)); - __ insert_w(dst, 1, kScratchReg); - __ insert_w(dst, 3, kScratchReg); - break; - } - case kMipsF64x2ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1) * 2); - __ FmoveLow(i.OutputDoubleRegister(), kScratchReg); - __ copy_u_w(kScratchReg, i.InputSimd128Register(0), - i.InputInt8(1) * 2 + 1); - __ FmoveHigh(i.OutputDoubleRegister(), kScratchReg); - break; - } - case kMipsF64x2ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - if (src != dst) { - __ move_v(dst, src); - } - __ FmoveLow(kScratchReg, i.InputDoubleRegister(2)); - __ insert_w(dst, i.InputInt8(1) * 2, kScratchReg); - __ FmoveHigh(kScratchReg, i.InputDoubleRegister(2)); - __ insert_w(dst, i.InputInt8(1) * 2 + 1, kScratchReg); - break; - } - case kMipsF64x2Pmin: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register lhs = i.InputSimd128Register(0); - Simd128Register rhs = i.InputSimd128Register(1); - // dst = rhs < lhs ? rhs : lhs - __ fclt_d(dst, rhs, lhs); - __ bsel_v(dst, lhs, rhs); - break; - } - case kMipsF64x2Pmax: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register lhs = i.InputSimd128Register(0); - Simd128Register rhs = i.InputSimd128Register(1); - // dst = lhs < rhs ? rhs : lhs - __ fclt_d(dst, lhs, rhs); - __ bsel_v(dst, lhs, rhs); - break; - } - case kMipsF64x2Ceil: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cfcmsa(kScratchReg, MSACSR); - __ li(kScratchReg2, kRoundToPlusInf); - __ ctcmsa(MSACSR, kScratchReg2); - __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ ctcmsa(MSACSR, kScratchReg); - break; - } - case kMipsF64x2Floor: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cfcmsa(kScratchReg, MSACSR); - __ li(kScratchReg2, kRoundToMinusInf); - __ ctcmsa(MSACSR, kScratchReg2); - __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ ctcmsa(MSACSR, kScratchReg); - break; - } - case kMipsF64x2Trunc: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cfcmsa(kScratchReg, MSACSR); - __ li(kScratchReg2, kRoundToZero); - __ ctcmsa(MSACSR, kScratchReg2); - __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ ctcmsa(MSACSR, kScratchReg); - break; - } - case kMipsF64x2NearestInt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cfcmsa(kScratchReg, MSACSR); - // kRoundToNearest == 0 - __ ctcmsa(MSACSR, zero_reg); - __ frint_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ ctcmsa(MSACSR, kScratchReg); - break; - } - case kMipsF64x2ConvertLowI32x4S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0)); - __ slli_d(kSimd128RegZero, kSimd128RegZero, 32); - __ srai_d(kSimd128RegZero, kSimd128RegZero, 32); - __ ffint_s_d(i.OutputSimd128Register(), kSimd128RegZero); - break; - } - case kMipsF64x2ConvertLowI32x4U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ilvr_w(kSimd128RegZero, kSimd128RegZero, i.InputSimd128Register(0)); - __ ffint_u_d(i.OutputSimd128Register(), kSimd128RegZero); - break; - } - case kMipsF64x2PromoteLowF32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fexupr_d(i.OutputSimd128Register(), i.InputSimd128Register(0)); - break; - } - case kMipsI64x2Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ addv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI64x2Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ subv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI64x2Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ mulv_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI64x2Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ subv_d(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI64x2Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ slli_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt6(1)); - break; - } - case kMipsI64x2ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ srai_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt6(1)); - break; - } - case kMipsI64x2ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ srli_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt6(1)); - break; - } - case kMipsI64x2BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Register dst = i.OutputRegister(); - Simd128Register src = i.InputSimd128Register(0); - Simd128Register scratch0 = kSimd128RegZero; - Simd128Register scratch1 = kSimd128ScratchReg; - __ srli_d(scratch0, src, 63); - __ shf_w(scratch1, scratch0, 0x02); - __ slli_d(scratch1, scratch1, 1); - __ or_v(scratch0, scratch0, scratch1); - __ copy_u_b(dst, scratch0, 0); - break; - } - case kMipsI64x2Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI64x2Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ceq_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - __ nor_v(i.OutputSimd128Register(), i.OutputSimd128Register(), - i.OutputSimd128Register()); - break; - } - case kMipsI64x2GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ clt_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI64x2GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cle_s_d(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI64x2Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ adds_a_d(i.OutputSimd128Register(), i.InputSimd128Register(0), - kSimd128RegZero); - break; - } - case kMipsI64x2SConvertI32x4Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src = i.InputSimd128Register(0); - __ ilvr_w(kSimd128ScratchReg, src, src); - __ slli_d(dst, kSimd128ScratchReg, 32); - __ srai_d(dst, dst, 32); - break; - } - case kMipsI64x2SConvertI32x4High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src = i.InputSimd128Register(0); - __ ilvl_w(kSimd128ScratchReg, src, src); - __ slli_d(dst, kSimd128ScratchReg, 32); - __ srai_d(dst, dst, 32); - break; - } - case kMipsI64x2UConvertI32x4Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ilvr_w(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI64x2UConvertI32x4High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ilvl_w(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI64x2ExtMulLowI32x4S: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_s_d); - break; - case kMipsI64x2ExtMulHighI32x4S: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_w, dotp_s_d); - break; - case kMipsI64x2ExtMulLowI32x4U: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_w, dotp_u_d); - break; - case kMipsI64x2ExtMulHighI32x4U: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_w, dotp_u_d); - break; - case kMipsI32x4ExtMulLowI16x8S: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_h, dotp_s_w); - break; - case kMipsI32x4ExtMulHighI16x8S: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_h, dotp_s_w); - break; - case kMipsI32x4ExtMulLowI16x8U: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_h, dotp_u_w); - break; - case kMipsI32x4ExtMulHighI16x8U: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_h, dotp_u_w); - break; - case kMipsI16x8ExtMulLowI8x16S: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_b, dotp_s_h); - break; - case kMipsI16x8ExtMulHighI8x16S: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_b, dotp_s_h); - break; - case kMipsI16x8ExtMulLowI8x16U: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvr_b, dotp_u_h); - break; - case kMipsI16x8ExtMulHighI8x16U: - ASSEMBLE_SIMD_EXTENDED_MULTIPLY(ilvl_b, dotp_u_h); - break; - case kMipsF32x4Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ FmoveLow(kScratchReg, i.InputSingleRegister(0)); - __ fill_w(i.OutputSimd128Register(), kScratchReg); - break; - } - case kMipsF32x4ExtractLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ copy_u_w(kScratchReg, i.InputSimd128Register(0), i.InputInt8(1)); - __ FmoveLow(i.OutputSingleRegister(), kScratchReg); - break; - } - case kMipsF32x4ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - if (src != dst) { - __ move_v(dst, src); - } - __ FmoveLow(kScratchReg, i.InputSingleRegister(2)); - __ insert_w(dst, i.InputInt8(1), kScratchReg); - break; - } - case kMipsF32x4SConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ffint_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); - break; - } - case kMipsF32x4UConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ffint_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); - break; - } - case kMipsF32x4DemoteF64x2Zero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ fexdo_w(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI32x4Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ mulv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI32x4MaxS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ max_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI32x4MinS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ min_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI32x4Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI32x4Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - __ ceq_w(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); - __ nor_v(dst, dst, dst); - break; - } - case kMipsI32x4Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ slli_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt5(1)); - break; - } - case kMipsI32x4ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ srai_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt5(1)); - break; - } - case kMipsI32x4ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ srli_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt5(1)); - break; - } - case kMipsI32x4MaxU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ max_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI32x4MinU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ min_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsS128Select: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - DCHECK(i.OutputSimd128Register() == i.InputSimd128Register(0)); - __ bsel_v(i.OutputSimd128Register(), i.InputSimd128Register(2), - i.InputSimd128Register(1)); - break; - } - case kMipsS128AndNot: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - __ nor_v(dst, i.InputSimd128Register(1), i.InputSimd128Register(1)); - __ and_v(dst, dst, i.InputSimd128Register(0)); - break; - } - case kMipsF32x4Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ bclri_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); - break; - } - case kMipsF32x4Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ bnegi_w(i.OutputSimd128Register(), i.InputSimd128Register(0), 31); - break; - } - case kMipsF32x4Sqrt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fsqrt_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); - break; - } - case kMipsF32x4Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fadd_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fsub_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fmul_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Div: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fdiv_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Max: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fmax_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Min: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fmin_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fceq_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fcne_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Lt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fclt_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Le: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fcle_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsF32x4Pmin: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register lhs = i.InputSimd128Register(0); - Simd128Register rhs = i.InputSimd128Register(1); - // dst = rhs < lhs ? rhs : lhs - __ fclt_w(dst, rhs, lhs); - __ bsel_v(dst, lhs, rhs); - break; - } - case kMipsF32x4Pmax: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register lhs = i.InputSimd128Register(0); - Simd128Register rhs = i.InputSimd128Register(1); - // dst = lhs < rhs ? rhs : lhs - __ fclt_w(dst, lhs, rhs); - __ bsel_v(dst, lhs, rhs); - break; - } - case kMipsF32x4Ceil: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cfcmsa(kScratchReg, MSACSR); - __ li(kScratchReg2, kRoundToPlusInf); - __ ctcmsa(MSACSR, kScratchReg2); - __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ ctcmsa(MSACSR, kScratchReg); - break; - } - case kMipsF32x4Floor: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cfcmsa(kScratchReg, MSACSR); - __ li(kScratchReg2, kRoundToMinusInf); - __ ctcmsa(MSACSR, kScratchReg2); - __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ ctcmsa(MSACSR, kScratchReg); - break; - } - case kMipsF32x4Trunc: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cfcmsa(kScratchReg, MSACSR); - __ li(kScratchReg2, kRoundToZero); - __ ctcmsa(MSACSR, kScratchReg2); - __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ ctcmsa(MSACSR, kScratchReg); - break; - } - case kMipsF32x4NearestInt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cfcmsa(kScratchReg, MSACSR); - // kRoundToNearest == 0 - __ ctcmsa(MSACSR, zero_reg); - __ frint_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); - __ ctcmsa(MSACSR, kScratchReg); - break; - } - case kMipsI32x4SConvertF32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ftrunc_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); - break; - } - case kMipsI32x4UConvertF32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ftrunc_u_w(i.OutputSimd128Register(), i.InputSimd128Register(0)); - break; - } - case kMipsI32x4Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ subv_w(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI32x4GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ clt_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI32x4GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cle_s_w(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI32x4GtU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ clt_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI32x4GeU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cle_u_w(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI32x4Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ asub_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - kSimd128RegZero); - break; - } - case kMipsI32x4BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Register dst = i.OutputRegister(); - Simd128Register src = i.InputSimd128Register(0); - Simd128Register scratch0 = kSimd128RegZero; - Simd128Register scratch1 = kSimd128ScratchReg; - __ srli_w(scratch0, src, 31); - __ srli_d(scratch1, scratch0, 31); - __ or_v(scratch0, scratch0, scratch1); - __ shf_w(scratch1, scratch0, 0x0E); - __ slli_d(scratch1, scratch1, 2); - __ or_v(scratch0, scratch0, scratch1); - __ copy_u_b(dst, scratch0, 0); - break; - } - case kMipsI32x4DotI16x8S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ dotp_s_w(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI32x4TruncSatF64x2SZero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ftrunc_s_d(kSimd128ScratchReg, i.InputSimd128Register(0)); - __ sat_s_d(kSimd128ScratchReg, kSimd128ScratchReg, 31); - __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero, - kSimd128ScratchReg); - break; - } - case kMipsI32x4TruncSatF64x2UZero: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ftrunc_u_d(kSimd128ScratchReg, i.InputSimd128Register(0)); - __ sat_u_d(kSimd128ScratchReg, kSimd128ScratchReg, 31); - __ pckev_w(i.OutputSimd128Register(), kSimd128RegZero, - kSimd128ScratchReg); - break; - } - case kMipsI16x8Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fill_h(i.OutputSimd128Register(), i.InputRegister(0)); - break; - } - case kMipsI16x8ExtractLaneU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ copy_u_h(i.OutputRegister(), i.InputSimd128Register(0), - i.InputInt8(1)); - break; - } - case kMipsI16x8ExtractLaneS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ copy_s_h(i.OutputRegister(), i.InputSimd128Register(0), - i.InputInt8(1)); - break; - } - case kMipsI16x8ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - if (src != dst) { - __ move_v(dst, src); - } - __ insert_h(dst, i.InputInt8(1), i.InputRegister(2)); - break; - } - case kMipsI16x8Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ subv_h(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI16x8Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ slli_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt4(1)); - break; - } - case kMipsI16x8ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ srai_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt4(1)); - break; - } - case kMipsI16x8ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ srli_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt4(1)); - break; - } - case kMipsI16x8Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ addv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8AddSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ adds_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ subv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8SubSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ subs_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8Mul: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ mulv_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8MaxS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ max_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8MinS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ min_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ceq_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - __ ceq_h(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); - __ nor_v(dst, dst, dst); - break; - } - case kMipsI16x8GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ clt_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI16x8GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cle_s_h(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI16x8AddSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ adds_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8SubSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ subs_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8MaxU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ max_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8MinU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ min_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8GtU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ clt_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI16x8GeU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cle_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI16x8RoundingAverageU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ aver_u_h(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI16x8Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ asub_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - kSimd128RegZero); - break; - } - case kMipsI16x8BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Register dst = i.OutputRegister(); - Simd128Register src = i.InputSimd128Register(0); - Simd128Register scratch0 = kSimd128RegZero; - Simd128Register scratch1 = kSimd128ScratchReg; - __ srli_h(scratch0, src, 15); - __ srli_w(scratch1, scratch0, 15); - __ or_v(scratch0, scratch0, scratch1); - __ srli_d(scratch1, scratch0, 30); - __ or_v(scratch0, scratch0, scratch1); - __ shf_w(scratch1, scratch0, 0x0E); - __ slli_d(scratch1, scratch1, 4); - __ or_v(scratch0, scratch0, scratch1); - __ copy_u_b(dst, scratch0, 0); - break; - } - case kMipsI16x8Q15MulRSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ mulr_q_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI16x8ExtAddPairwiseI8x16S: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ hadd_s_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(0)); - break; - } - case kMipsI16x8ExtAddPairwiseI8x16U: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ hadd_u_h(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(0)); - break; - } - case kMipsI8x16Splat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ fill_b(i.OutputSimd128Register(), i.InputRegister(0)); - break; - } - case kMipsI8x16ExtractLaneU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ copy_u_b(i.OutputRegister(), i.InputSimd128Register(0), - i.InputInt8(1)); - break; - } - case kMipsI8x16ExtractLaneS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ copy_s_b(i.OutputRegister(), i.InputSimd128Register(0), - i.InputInt8(1)); - break; - } - case kMipsI8x16ReplaceLane: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register src = i.InputSimd128Register(0); - Simd128Register dst = i.OutputSimd128Register(); - if (src != dst) { - __ move_v(dst, src); - } - __ insert_b(dst, i.InputInt8(1), i.InputRegister(2)); - break; - } - case kMipsI8x16Neg: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ subv_b(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI8x16Shl: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ slli_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt3(1)); - break; - } - case kMipsI8x16ShrS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ srai_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt3(1)); - break; - } - case kMipsI8x16Add: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ addv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16AddSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ adds_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16Sub: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ subv_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16SubSatS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ subs_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16MaxS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ max_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16MinS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ min_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16Eq: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ceq_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16Ne: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - __ ceq_b(dst, i.InputSimd128Register(0), i.InputSimd128Register(1)); - __ nor_v(dst, dst, dst); - break; - } - case kMipsI8x16GtS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ clt_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI8x16GeS: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cle_s_b(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI8x16ShrU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ srli_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputInt3(1)); - break; - } - case kMipsI8x16AddSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ adds_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16SubSatU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ subs_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16MaxU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ max_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16MinU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ min_u_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsI8x16GtU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ clt_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI8x16GeU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ cle_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI8x16RoundingAverageU: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ aver_u_b(i.OutputSimd128Register(), i.InputSimd128Register(1), - i.InputSimd128Register(0)); - break; - } - case kMipsI8x16Abs: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ asub_s_b(i.OutputSimd128Register(), i.InputSimd128Register(0), - kSimd128RegZero); - break; - } - case kMipsI8x16Popcnt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ pcnt_b(i.OutputSimd128Register(), i.InputSimd128Register(0)); - break; - } - case kMipsI8x16BitMask: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Register dst = i.OutputRegister(); - Simd128Register src = i.InputSimd128Register(0); - Simd128Register scratch0 = kSimd128RegZero; - Simd128Register scratch1 = kSimd128ScratchReg; - __ srli_b(scratch0, src, 7); - __ srli_h(scratch1, scratch0, 7); - __ or_v(scratch0, scratch0, scratch1); - __ srli_w(scratch1, scratch0, 14); - __ or_v(scratch0, scratch0, scratch1); - __ srli_d(scratch1, scratch0, 28); - __ or_v(scratch0, scratch0, scratch1); - __ shf_w(scratch1, scratch0, 0x0E); - __ ilvev_b(scratch0, scratch1, scratch0); - __ copy_u_h(dst, scratch0, 0); - break; - } - case kMipsS128And: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ and_v(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsS128Or: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ or_v(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsS128Xor: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(1)); - break; - } - case kMipsS128Not: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ nor_v(i.OutputSimd128Register(), i.InputSimd128Register(0), - i.InputSimd128Register(0)); - break; - } - case kMipsV128AnyTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Register dst = i.OutputRegister(); - Label all_false; - - __ BranchMSA(&all_false, MSA_BRANCH_V, all_zero, - i.InputSimd128Register(0), USE_DELAY_SLOT); - __ li(dst, 0); // branch delay slot - __ li(dst, -1); - __ bind(&all_false); - break; - } - case kMipsI64x2AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Register dst = i.OutputRegister(); - Label all_true; - __ BranchMSA(&all_true, MSA_BRANCH_D, all_not_zero, - i.InputSimd128Register(0), USE_DELAY_SLOT); - __ li(dst, -1); // branch delay slot - __ li(dst, 0); - __ bind(&all_true); - break; - } - case kMipsI32x4AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Register dst = i.OutputRegister(); - Label all_true; - __ BranchMSA(&all_true, MSA_BRANCH_W, all_not_zero, - i.InputSimd128Register(0), USE_DELAY_SLOT); - __ li(dst, -1); // branch delay slot - __ li(dst, 0); - __ bind(&all_true); - break; - } - case kMipsI16x8AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Register dst = i.OutputRegister(); - Label all_true; - __ BranchMSA(&all_true, MSA_BRANCH_H, all_not_zero, - i.InputSimd128Register(0), USE_DELAY_SLOT); - __ li(dst, -1); // branch delay slot - __ li(dst, 0); - __ bind(&all_true); - break; - } - case kMipsI8x16AllTrue: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Register dst = i.OutputRegister(); - Label all_true; - __ BranchMSA(&all_true, MSA_BRANCH_B, all_not_zero, - i.InputSimd128Register(0), USE_DELAY_SLOT); - __ li(dst, -1); // branch delay slot - __ li(dst, 0); - __ bind(&all_true); - break; - } - case kMipsMsaLd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ld_b(i.OutputSimd128Register(), i.MemoryOperand()); - break; - } - case kMipsMsaSt: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ st_b(i.InputSimd128Register(2), i.MemoryOperand()); - break; - } - case kMipsS32x4InterleaveRight: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] - // dst = [5, 1, 4, 0] - __ ilvr_w(dst, src1, src0); - break; - } - case kMipsS32x4InterleaveLeft: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] - // dst = [7, 3, 6, 2] - __ ilvl_w(dst, src1, src0); - break; - } - case kMipsS32x4PackEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] - // dst = [6, 4, 2, 0] - __ pckev_w(dst, src1, src0); - break; - } - case kMipsS32x4PackOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] - // dst = [7, 5, 3, 1] - __ pckod_w(dst, src1, src0); - break; - } - case kMipsS32x4InterleaveEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] - // dst = [6, 2, 4, 0] - __ ilvev_w(dst, src1, src0); - break; - } - case kMipsS32x4InterleaveOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [7, 6, 5, 4], src0 = [3, 2, 1, 0] - // dst = [7, 3, 5, 1] - __ ilvod_w(dst, src1, src0); - break; - } - case kMipsS32x4Shuffle: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - - int32_t shuffle = i.InputInt32(2); - - if (src0 == src1) { - // Unary S32x4 shuffles are handled with shf.w instruction - unsigned lane = shuffle & 0xFF; - if (FLAG_debug_code) { - // range of all four lanes, for unary instruction, - // should belong to the same range, which can be one of these: - // [0, 3] or [4, 7] - if (lane >= 4) { - int32_t shuffle_helper = shuffle; - for (int i = 0; i < 4; ++i) { - lane = shuffle_helper & 0xFF; - CHECK_GE(lane, 4); - shuffle_helper >>= 8; - } - } - } - uint32_t i8 = 0; - for (int i = 0; i < 4; i++) { - lane = shuffle & 0xFF; - if (lane >= 4) { - lane -= 4; - } - DCHECK_GT(4, lane); - i8 |= lane << (2 * i); - shuffle >>= 8; - } - __ shf_w(dst, src0, i8); - } else { - // For binary shuffles use vshf.w instruction - if (dst == src0) { - __ move_v(kSimd128ScratchReg, src0); - src0 = kSimd128ScratchReg; - } else if (dst == src1) { - __ move_v(kSimd128ScratchReg, src1); - src1 = kSimd128ScratchReg; - } - - __ li(kScratchReg, i.InputInt32(2)); - __ insert_w(dst, 0, kScratchReg); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ilvr_b(dst, kSimd128RegZero, dst); - __ ilvr_h(dst, kSimd128RegZero, dst); - __ vshf_w(dst, src1, src0); - } - break; - } - case kMipsS16x8InterleaveRight: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] - // dst = [11, 3, 10, 2, 9, 1, 8, 0] - __ ilvr_h(dst, src1, src0); - break; - } - case kMipsS16x8InterleaveLeft: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] - // dst = [15, 7, 14, 6, 13, 5, 12, 4] - __ ilvl_h(dst, src1, src0); - break; - } - case kMipsS16x8PackEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] - // dst = [14, 12, 10, 8, 6, 4, 2, 0] - __ pckev_h(dst, src1, src0); - break; - } - case kMipsS16x8PackOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] - // dst = [15, 13, 11, 9, 7, 5, 3, 1] - __ pckod_h(dst, src1, src0); - break; - } - case kMipsS16x8InterleaveEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] - // dst = [14, 6, 12, 4, 10, 2, 8, 0] - __ ilvev_h(dst, src1, src0); - break; - } - case kMipsS16x8InterleaveOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [15, ... 11, 10, 9, 8], src0 = [7, ... 3, 2, 1, 0] - // dst = [15, 7, ... 11, 3, 9, 1] - __ ilvod_h(dst, src1, src0); - break; - } - case kMipsS16x4Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [4, 5, 6, 7, 0, 1, 2, 3] - // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B - __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B); - break; - } - case kMipsS16x2Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - // src = [7, 6, 5, 4, 3, 2, 1, 0], dst = [6, 7, 4, 5, 3, 2, 0, 1] - // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1 - __ shf_h(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1); - break; - } - case kMipsS8x16InterleaveRight: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] - // dst = [23, 7, ... 17, 1, 16, 0] - __ ilvr_b(dst, src1, src0); - break; - } - case kMipsS8x16InterleaveLeft: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] - // dst = [31, 15, ... 25, 9, 24, 8] - __ ilvl_b(dst, src1, src0); - break; - } - case kMipsS8x16PackEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] - // dst = [30, 28, ... 6, 4, 2, 0] - __ pckev_b(dst, src1, src0); - break; - } - case kMipsS8x16PackOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] - // dst = [31, 29, ... 7, 5, 3, 1] - __ pckod_b(dst, src1, src0); - break; - } - case kMipsS8x16InterleaveEven: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] - // dst = [30, 14, ... 18, 2, 16, 0] - __ ilvev_b(dst, src1, src0); - break; - } - case kMipsS8x16InterleaveOdd: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - // src1 = [31, ... 19, 18, 17, 16], src0 = [15, ... 3, 2, 1, 0] - // dst = [31, 15, ... 19, 3, 17, 1] - __ ilvod_b(dst, src1, src0); - break; - } - case kMipsS8x16Concat: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - DCHECK(dst == i.InputSimd128Register(0)); - __ sldi_b(dst, i.InputSimd128Register(1), i.InputInt4(2)); - break; - } - case kMipsI8x16Shuffle: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(), - src0 = i.InputSimd128Register(0), - src1 = i.InputSimd128Register(1); - - if (dst == src0) { - __ move_v(kSimd128ScratchReg, src0); - src0 = kSimd128ScratchReg; - } else if (dst == src1) { - __ move_v(kSimd128ScratchReg, src1); - src1 = kSimd128ScratchReg; - } - - __ li(kScratchReg, i.InputInt32(2)); - __ insert_w(dst, 0, kScratchReg); - __ li(kScratchReg, i.InputInt32(3)); - __ insert_w(dst, 1, kScratchReg); - __ li(kScratchReg, i.InputInt32(4)); - __ insert_w(dst, 2, kScratchReg); - __ li(kScratchReg, i.InputInt32(5)); - __ insert_w(dst, 3, kScratchReg); - __ vshf_b(dst, src1, src0); - break; - } - case kMipsI8x16Swizzle: { - Simd128Register dst = i.OutputSimd128Register(), - tbl = i.InputSimd128Register(0), - ctl = i.InputSimd128Register(1); - DCHECK(dst != ctl && dst != tbl); - Simd128Register zeroReg = i.TempSimd128Register(0); - __ fill_w(zeroReg, zero_reg); - __ move_v(dst, ctl); - __ vshf_b(dst, tbl, zeroReg); - break; - } - case kMipsS8x8Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - // src = [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - // dst = [8, 9, 10, 11, 12, 13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7] - // [A B C D] => [B A D C]: shf.w imm: 2 3 0 1 = 10110001 = 0xB1 - // C: [7, 6, 5, 4] => A': [4, 5, 6, 7]: shf.b imm: 00011011 = 0x1B - __ shf_w(kSimd128ScratchReg, i.InputSimd128Register(0), 0xB1); - __ shf_b(i.OutputSimd128Register(), kSimd128ScratchReg, 0x1B); - break; - } - case kMipsS8x4Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - // src = [15, 14, ... 3, 2, 1, 0], dst = [12, 13, 14, 15, ... 0, 1, 2, 3] - // shf.df imm field: 0 1 2 3 = 00011011 = 0x1B - __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0x1B); - break; - } - case kMipsS8x2Reverse: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - // src = [15, 14, ... 3, 2, 1, 0], dst = [14, 15, 12, 13, ... 2, 3, 0, 1] - // shf.df imm field: 2 3 0 1 = 10110001 = 0xB1 - __ shf_b(i.OutputSimd128Register(), i.InputSimd128Register(0), 0xB1); - break; - } - case kMipsI32x4SConvertI16x8Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src = i.InputSimd128Register(0); - __ ilvr_h(kSimd128ScratchReg, src, src); - __ slli_w(dst, kSimd128ScratchReg, 16); - __ srai_w(dst, dst, 16); - break; - } - case kMipsI32x4SConvertI16x8High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src = i.InputSimd128Register(0); - __ ilvl_h(kSimd128ScratchReg, src, src); - __ slli_w(dst, kSimd128ScratchReg, 16); - __ srai_w(dst, dst, 16); - break; - } - case kMipsI32x4UConvertI16x8Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ilvr_h(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI32x4UConvertI16x8High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ilvl_h(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI16x8SConvertI8x16Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src = i.InputSimd128Register(0); - __ ilvr_b(kSimd128ScratchReg, src, src); - __ slli_h(dst, kSimd128ScratchReg, 8); - __ srai_h(dst, dst, 8); - break; - } - case kMipsI16x8SConvertI8x16High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src = i.InputSimd128Register(0); - __ ilvl_b(kSimd128ScratchReg, src, src); - __ slli_h(dst, kSimd128ScratchReg, 8); - __ srai_h(dst, dst, 8); - break; - } - case kMipsI16x8SConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src0 = i.InputSimd128Register(0); - Simd128Register src1 = i.InputSimd128Register(1); - __ sat_s_w(kSimd128ScratchReg, src0, 15); - __ sat_s_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch - __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg); - break; - } - case kMipsI16x8UConvertI32x4: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src0 = i.InputSimd128Register(0); - Simd128Register src1 = i.InputSimd128Register(1); - __ sat_u_w(kSimd128ScratchReg, src0, 15); - __ sat_u_w(kSimd128RegZero, src1, 15); // kSimd128RegZero as scratch - __ pckev_h(dst, kSimd128RegZero, kSimd128ScratchReg); - break; - } - case kMipsI16x8UConvertI8x16Low: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ilvr_b(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI16x8UConvertI8x16High: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); - __ ilvl_b(i.OutputSimd128Register(), kSimd128RegZero, - i.InputSimd128Register(0)); - break; - } - case kMipsI8x16SConvertI16x8: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src0 = i.InputSimd128Register(0); - Simd128Register src1 = i.InputSimd128Register(1); - __ sat_s_h(kSimd128ScratchReg, src0, 7); - __ sat_s_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch - __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg); - break; - } - case kMipsI8x16UConvertI16x8: { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - Simd128Register dst = i.OutputSimd128Register(); - Simd128Register src0 = i.InputSimd128Register(0); - Simd128Register src1 = i.InputSimd128Register(1); - __ sat_u_h(kSimd128ScratchReg, src0, 7); - __ sat_u_h(kSimd128RegZero, src1, 7); // kSimd128RegZero as scratch - __ pckev_b(dst, kSimd128RegZero, kSimd128ScratchReg); - break; - } - } - return kSuccess; -} - -void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm, - Instruction* instr, FlagsCondition condition, - Label* tlabel, Label* flabel, bool fallthru) { -#undef __ -#define __ tasm-> - - // MIPS does not have condition code flags, so compare and branch are - // implemented differently than on the other arch's. The compare operations - // emit mips pseudo-instructions, which are handled here by branch - // instructions that do the actual comparison. Essential that the input - // registers to compare pseudo-op are not modified before this branch op, as - // they are tested here. - - MipsOperandConverter i(gen, instr); - if (instr->arch_opcode() == kMipsTst) { - Condition cc = FlagsConditionToConditionTst(condition); - __ Branch(tlabel, cc, kScratchReg, Operand(zero_reg)); - } else if (instr->arch_opcode() == kMipsAddOvf || - instr->arch_opcode() == kMipsSubOvf) { - // Overflow occurs if overflow register is negative - switch (condition) { - case kOverflow: - __ Branch(tlabel, lt, kScratchReg, Operand(zero_reg)); - break; - case kNotOverflow: - __ Branch(tlabel, ge, kScratchReg, Operand(zero_reg)); - break; - default: - UNSUPPORTED_COND(instr->arch_opcode(), condition); - } - } else if (instr->arch_opcode() == kMipsMulOvf) { - // Overflow occurs if overflow register is not zero - switch (condition) { - case kOverflow: - __ Branch(tlabel, ne, kScratchReg, Operand(zero_reg)); - break; - case kNotOverflow: - __ Branch(tlabel, eq, kScratchReg, Operand(zero_reg)); - break; - default: - UNSUPPORTED_COND(kMipsMulOvf, condition); - } - } else if (instr->arch_opcode() == kMipsCmp) { - Condition cc = FlagsConditionToConditionCmp(condition); - __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1)); - } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { - Condition cc = FlagsConditionToConditionCmp(condition); - DCHECK((cc == ls) || (cc == hi)); - if (cc == ls) { - __ xori(i.TempRegister(0), i.TempRegister(0), 1); - } - __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg)); - } else if (instr->arch_opcode() == kMipsCmpS || - instr->arch_opcode() == kMipsCmpD) { - bool predicate; - FlagsConditionToConditionCmpFPU(&predicate, condition); - if (predicate) { - __ BranchTrueF(tlabel); - } else { - __ BranchFalseF(tlabel); - } - } else { - PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n", - instr->arch_opcode()); - UNIMPLEMENTED(); - } - if (!fallthru) __ Branch(flabel); // no fallthru to flabel. -#undef __ -#define __ tasm()-> -} - -// Assembles branches after an instruction. -void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { - Label* tlabel = branch->true_label; - Label* flabel = branch->false_label; - AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel, - branch->fallthru); -} - -void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, - BranchInfo* branch) { - AssembleArchBranch(instr, branch); -} - -void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder( - RpoNumber target) { - __ Branch(GetLabel(target)); -} - -#if V8_ENABLE_WEBASSEMBLY -void CodeGenerator::AssembleArchTrap(Instruction* instr, - FlagsCondition condition) { - class OutOfLineTrap final : public OutOfLineCode { - public: - OutOfLineTrap(CodeGenerator* gen, Instruction* instr) - : OutOfLineCode(gen), instr_(instr), gen_(gen) {} - - void Generate() final { - MipsOperandConverter i(gen_, instr_); - TrapId trap_id = - static_cast(i.InputInt32(instr_->InputCount() - 1)); - GenerateCallToTrap(trap_id); - } - - private: - void GenerateCallToTrap(TrapId trap_id) { - if (trap_id == TrapId::kInvalid) { - // We cannot test calls to the runtime in cctest/test-run-wasm. - // Therefore we emit a call to C here instead of a call to the runtime. - // We use the context register as the scratch register, because we do - // not have a context here. - __ PrepareCallCFunction(0, 0, cp); - __ CallCFunction( - ExternalReference::wasm_call_trap_callback_for_testing(), 0); - __ LeaveFrame(StackFrame::WASM); - auto call_descriptor = gen_->linkage()->GetIncomingDescriptor(); - int pop_count = static_cast(call_descriptor->ParameterSlotCount()); - __ Drop(pop_count); - __ Ret(); - } else { - gen_->AssembleSourcePosition(instr_); - // A direct call to a wasm runtime stub defined in this module. - // Just encode the stub index. This will be patched when the code - // is added to the native module and copied into wasm code space. - __ Call(static_cast
(trap_id), RelocInfo::WASM_STUB_CALL); - ReferenceMap* reference_map = - gen_->zone()->New(gen_->zone()); - gen_->RecordSafepoint(reference_map); - if (FLAG_debug_code) { - __ stop(); - } - } - } - - Instruction* instr_; - CodeGenerator* gen_; - }; - auto ool = zone()->New(this, instr); - Label* tlabel = ool->entry(); - AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true); -} -#endif // V8_ENABLE_WEBASSEMBLY - -// Assembles boolean materializations after an instruction. -void CodeGenerator::AssembleArchBoolean(Instruction* instr, - FlagsCondition condition) { - MipsOperandConverter i(this, instr); - - // Materialize a full 32-bit 1 or 0 value. The result register is always the - // last output of the instruction. - DCHECK_NE(0u, instr->OutputCount()); - Register result = i.OutputRegister(instr->OutputCount() - 1); - // MIPS does not have condition code flags, so compare and branch are - // implemented differently than on the other arch's. The compare operations - // emit mips pseudo-instructions, which are checked and handled here. - - if (instr->arch_opcode() == kMipsTst) { - Condition cc = FlagsConditionToConditionTst(condition); - if (cc == eq) { - __ Sltu(result, kScratchReg, 1); - } else { - __ Sltu(result, zero_reg, kScratchReg); - } - return; - } else if (instr->arch_opcode() == kMipsAddOvf || - instr->arch_opcode() == kMipsSubOvf) { - // Overflow occurs if overflow register is negative - __ slt(result, kScratchReg, zero_reg); - } else if (instr->arch_opcode() == kMipsMulOvf) { - // Overflow occurs if overflow register is not zero - __ Sgtu(result, kScratchReg, zero_reg); - } else if (instr->arch_opcode() == kMipsCmp) { - Condition cc = FlagsConditionToConditionCmp(condition); - switch (cc) { - case eq: - case ne: { - Register left = i.InputRegister(0); - Operand right = i.InputOperand(1); - if (instr->InputAt(1)->IsImmediate()) { - if (is_int16(-right.immediate())) { - if (right.immediate() == 0) { - if (cc == eq) { - __ Sltu(result, left, 1); - } else { - __ Sltu(result, zero_reg, left); - } - } else { - __ Addu(result, left, -right.immediate()); - if (cc == eq) { - __ Sltu(result, result, 1); - } else { - __ Sltu(result, zero_reg, result); - } - } - } else { - if (is_uint16(right.immediate())) { - __ Xor(result, left, right); - } else { - __ li(kScratchReg, right); - __ Xor(result, left, kScratchReg); - } - if (cc == eq) { - __ Sltu(result, result, 1); - } else { - __ Sltu(result, zero_reg, result); - } - } - } else { - __ Xor(result, left, right); - if (cc == eq) { - __ Sltu(result, result, 1); - } else { - __ Sltu(result, zero_reg, result); - } - } - } break; - case lt: - case ge: { - Register left = i.InputRegister(0); - Operand right = i.InputOperand(1); - __ Slt(result, left, right); - if (cc == ge) { - __ xori(result, result, 1); - } - } break; - case gt: - case le: { - Register left = i.InputRegister(1); - Operand right = i.InputOperand(0); - __ Slt(result, left, right); - if (cc == le) { - __ xori(result, result, 1); - } - } break; - case lo: - case hs: { - Register left = i.InputRegister(0); - Operand right = i.InputOperand(1); - __ Sltu(result, left, right); - if (cc == hs) { - __ xori(result, result, 1); - } - } break; - case hi: - case ls: { - Register left = i.InputRegister(1); - Operand right = i.InputOperand(0); - __ Sltu(result, left, right); - if (cc == ls) { - __ xori(result, result, 1); - } - } break; - default: - UNREACHABLE(); - } - return; - } else if (instr->arch_opcode() == kMipsCmpD || - instr->arch_opcode() == kMipsCmpS) { - FPURegister left = i.InputOrZeroDoubleRegister(0); - FPURegister right = i.InputOrZeroDoubleRegister(1); - if ((left == kDoubleRegZero || right == kDoubleRegZero) && - !__ IsDoubleZeroRegSet()) { - __ Move(kDoubleRegZero, 0.0); - } - bool predicate; - FlagsConditionToConditionCmpFPU(&predicate, condition); - if (!IsMipsArchVariant(kMips32r6)) { - __ li(result, Operand(1)); - if (predicate) { - __ Movf(result, zero_reg); - } else { - __ Movt(result, zero_reg); - } - } else { - __ mfc1(result, kDoubleCompareReg); - if (predicate) { - __ And(result, result, 1); // cmp returns all 1's/0's, use only LSB. - } else { - __ Addu(result, result, 1); // Toggle result for not equal. - } - } - return; - } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) { - Condition cc = FlagsConditionToConditionCmp(condition); - DCHECK((cc == ls) || (cc == hi)); - if (cc == ls) { - __ xori(i.OutputRegister(), i.TempRegister(0), 1); - } - return; - } else { - PrintF("AssembleArchBoolean Unimplemented arch_opcode is : %d\n", - instr->arch_opcode()); - TRACE_UNIMPL(); - UNIMPLEMENTED(); - } -} - -void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { - MipsOperandConverter i(this, instr); - Register input = i.InputRegister(0); - std::vector> cases; - for (size_t index = 2; index < instr->InputCount(); index += 2) { - cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))}); - } - AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(), - cases.data() + cases.size()); -} - -void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { - MipsOperandConverter i(this, instr); - Register input = i.InputRegister(0); - size_t const case_count = instr->InputCount() - 2; - __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count)); - __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) { - return GetLabel(i.InputRpo(index + 2)); - }); -} - -void CodeGenerator::AssembleArchSelect(Instruction* instr, - FlagsCondition condition) { - UNIMPLEMENTED(); -} - -void CodeGenerator::FinishFrame(Frame* frame) { - auto call_descriptor = linkage()->GetIncomingDescriptor(); - - const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); - if (!saves_fpu.is_empty()) { - frame->AlignSavedCalleeRegisterSlots(); - } - - if (!saves_fpu.is_empty()) { - int count = saves_fpu.Count(); - DCHECK_EQ(kNumCalleeSavedFPU, count); - frame->AllocateSavedCalleeRegisterSlots(count * - (kDoubleSize / kSystemPointerSize)); - } - - const RegList saves = call_descriptor->CalleeSavedRegisters(); - if (!saves.is_empty()) { - int count = saves.Count(); - frame->AllocateSavedCalleeRegisterSlots(count); - } -} - -void CodeGenerator::AssembleConstructFrame() { - auto call_descriptor = linkage()->GetIncomingDescriptor(); - if (frame_access_state()->has_frame()) { - if (call_descriptor->IsCFunctionCall()) { -#if V8_ENABLE_WEBASSEMBLY - if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) { - __ StubPrologue(StackFrame::C_WASM_ENTRY); - // Reserve stack space for saving the c_entry_fp later. - __ Subu(sp, sp, Operand(kSystemPointerSize)); -#else - // For balance. - if (false) { -#endif // V8_ENABLE_WEBASSEMBLY - } else { - __ Push(ra, fp); - __ mov(fp, sp); - } - } else if (call_descriptor->IsJSFunctionCall()) { - __ Prologue(); - } else { - __ StubPrologue(info()->GetOutputStackFrameType()); -#if V8_ENABLE_WEBASSEMBLY - if (call_descriptor->IsWasmFunctionCall() || - call_descriptor->IsWasmImportWrapper() || - call_descriptor->IsWasmCapiFunction()) { - __ Push(kWasmInstanceRegister); - } - if (call_descriptor->IsWasmCapiFunction()) { - // Reserve space for saving the PC later. - __ Subu(sp, sp, Operand(kSystemPointerSize)); - } -#endif // V8_ENABLE_WEBASSEMBLY - } - } - - int required_slots = - frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount(); - - if (info()->is_osr()) { - // TurboFan OSR-compiled functions cannot be entered directly. - __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction); - - // Unoptimized code jumps directly to this entrypoint while the unoptimized - // frame is still on the stack. Optimized code uses OSR values directly from - // the unoptimized frame. Thus, all that needs to be done is to allocate the - // remaining stack slots. - __ RecordComment("-- OSR entrypoint --"); - osr_pc_offset_ = __ pc_offset(); - required_slots -= osr_helper()->UnoptimizedFrameSlots(); - } - - const RegList saves = call_descriptor->CalleeSavedRegisters(); - const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); - - if (required_slots > 0) { - DCHECK(frame_access_state()->has_frame()); -#if V8_ENABLE_WEBASSEMBLY - if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) { - // For WebAssembly functions with big frames we have to do the stack - // overflow check before we construct the frame. Otherwise we may not - // have enough space on the stack to call the runtime for the stack - // overflow. - Label done; - - // If the frame is bigger than the stack, we throw the stack overflow - // exception unconditionally. Thereby we can avoid the integer overflow - // check in the condition code. - if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) { - __ Lw( - kScratchReg, - FieldMemOperand(kWasmInstanceRegister, - WasmInstanceObject::kRealStackLimitAddressOffset)); - __ Lw(kScratchReg, MemOperand(kScratchReg)); - __ Addu(kScratchReg, kScratchReg, - Operand(required_slots * kSystemPointerSize)); - __ Branch(&done, uge, sp, Operand(kScratchReg)); - } - - __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); - // The call does not return, hence we can ignore any references and just - // define an empty safepoint. - ReferenceMap* reference_map = zone()->New(zone()); - RecordSafepoint(reference_map); - if (FLAG_debug_code) __ stop(); - - __ bind(&done); - } -#endif // V8_ENABLE_WEBASSEMBLY - } - - const int returns = frame()->GetReturnSlotCount(); - - // Skip callee-saved and return slots, which are pushed below. - required_slots -= saves.Count(); - required_slots -= 2 * saves_fpu.Count(); - required_slots -= returns; - if (required_slots > 0) { - __ Subu(sp, sp, Operand(required_slots * kSystemPointerSize)); - } - - // Save callee-saved FPU registers. - if (!saves_fpu.is_empty()) { - __ MultiPushFPU(saves_fpu); - } - - if (!saves.is_empty()) { - // Save callee-saved registers. - __ MultiPush(saves); - } - - if (returns != 0) { - // Create space for returns. - __ Subu(sp, sp, Operand(returns * kSystemPointerSize)); - } -} - -void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { - auto call_descriptor = linkage()->GetIncomingDescriptor(); - - const int returns = frame()->GetReturnSlotCount(); - if (returns != 0) { - __ Addu(sp, sp, Operand(returns * kSystemPointerSize)); - } - - // Restore GP registers. - const RegList saves = call_descriptor->CalleeSavedRegisters(); - if (!saves.is_empty()) { - __ MultiPop(saves); - } - - // Restore FPU registers. - const DoubleRegList saves_fpu = call_descriptor->CalleeSavedFPRegisters(); - if (!saves_fpu.is_empty()) { - __ MultiPopFPU(saves_fpu); - } - - MipsOperandConverter g(this, nullptr); - const int parameter_slots = - static_cast(call_descriptor->ParameterSlotCount()); - - // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}. - // Check RawMachineAssembler::PopAndReturn. - if (parameter_slots != 0) { - if (additional_pop_count->IsImmediate()) { - DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0); - } else if (FLAG_debug_code) { - __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue, - g.ToRegister(additional_pop_count), - Operand(static_cast(0))); - } - } - // Functions with JS linkage have at least one parameter (the receiver). - // If {parameter_slots} == 0, it means it is a builtin with - // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping - // itself. - const bool drop_jsargs = frame_access_state()->has_frame() && - call_descriptor->IsJSFunctionCall() && - parameter_slots != 0; - - if (call_descriptor->IsCFunctionCall()) { - AssembleDeconstructFrame(); - } else if (frame_access_state()->has_frame()) { - // Canonicalize JSFunction return sites for now unless they have an variable - // number of stack slot pops. - if (additional_pop_count->IsImmediate() && - g.ToConstant(additional_pop_count).ToInt32() == 0) { - if (return_label_.is_bound()) { - __ Branch(&return_label_); - return; - } else { - __ bind(&return_label_); - } - } - if (drop_jsargs) { - // Get the actual argument count - __ Lw(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset)); - } - AssembleDeconstructFrame(); - } - - if (drop_jsargs) { - // We must pop all arguments from the stack (including the receiver). This - // number of arguments is given by max(1 + argc_reg, parameter_slots). - if (parameter_slots > 1) { - __ li(kScratchReg, parameter_slots); - __ slt(kScratchReg2, t0, kScratchReg); - __ movn(t0, kScratchReg, kScratchReg2); - } - __ Lsa(sp, sp, t0, kSystemPointerSizeLog2, t0); - } else if (additional_pop_count->IsImmediate()) { - DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type()); - int additional_count = g.ToConstant(additional_pop_count).ToInt32(); - __ Drop(parameter_slots + additional_count); - } else { - Register pop_reg = g.ToRegister(additional_pop_count); - __ Drop(parameter_slots); - __ Lsa(sp, sp, pop_reg, kSystemPointerSizeLog2, pop_reg); - } - __ Ret(); -} - -void CodeGenerator::FinishCode() {} - -void CodeGenerator::PrepareForDeoptimizationExits( - ZoneDeque* exits) {} - -void CodeGenerator::AssembleMove(InstructionOperand* source, - InstructionOperand* destination) { - MipsOperandConverter g(this, nullptr); - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - if (source->IsRegister()) { - DCHECK(destination->IsRegister() || destination->IsStackSlot()); - Register src = g.ToRegister(source); - if (destination->IsRegister()) { - __ mov(g.ToRegister(destination), src); - } else { - __ sw(src, g.ToMemOperand(destination)); - } - } else if (source->IsStackSlot()) { - DCHECK(destination->IsRegister() || destination->IsStackSlot()); - MemOperand src = g.ToMemOperand(source); - if (destination->IsRegister()) { - __ lw(g.ToRegister(destination), src); - } else { - Register temp = kScratchReg; - __ lw(temp, src); - __ sw(temp, g.ToMemOperand(destination)); - } - } else if (source->IsConstant()) { - Constant src = g.ToConstant(source); - if (destination->IsRegister() || destination->IsStackSlot()) { - Register dst = - destination->IsRegister() ? g.ToRegister(destination) : kScratchReg; - switch (src.type()) { - case Constant::kInt32: -#if V8_ENABLE_WEBASSEMBLY - if (RelocInfo::IsWasmReference(src.rmode())) - __ li(dst, Operand(src.ToInt32(), src.rmode())); - else -#endif // V8_ENABLE_WEBASSEMBLY - __ li(dst, Operand(src.ToInt32())); - break; - case Constant::kFloat32: - __ li(dst, Operand::EmbeddedNumber(src.ToFloat32())); - break; - case Constant::kInt64: - UNREACHABLE(); - case Constant::kFloat64: - __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value())); - break; - case Constant::kExternalReference: - __ li(dst, src.ToExternalReference()); - break; - case Constant::kDelayedStringConstant: - __ li(dst, src.ToDelayedStringConstant()); - break; - case Constant::kHeapObject: { - Handle src_object = src.ToHeapObject(); - RootIndex index; - if (IsMaterializableFromRoot(src_object, &index)) { - __ LoadRoot(dst, index); - } else { - __ li(dst, src_object); - } - break; - } - case Constant::kCompressedHeapObject: - UNREACHABLE(); - case Constant::kRpoNumber: - UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips. - } - if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination)); - } else if (src.type() == Constant::kFloat32) { - if (destination->IsFPStackSlot()) { - MemOperand dst = g.ToMemOperand(destination); - if (base::bit_cast(src.ToFloat32()) == 0) { - __ sw(zero_reg, dst); - } else { - __ li(kScratchReg, Operand(base::bit_cast(src.ToFloat32()))); - __ sw(kScratchReg, dst); - } - } else { - DCHECK(destination->IsFPRegister()); - FloatRegister dst = g.ToSingleRegister(destination); - __ Move(dst, src.ToFloat32()); - } - } else { - DCHECK_EQ(Constant::kFloat64, src.type()); - DoubleRegister dst = destination->IsFPRegister() - ? g.ToDoubleRegister(destination) - : kScratchDoubleReg; - __ Move(dst, src.ToFloat64().value()); - if (destination->IsFPStackSlot()) { - __ Sdc1(dst, g.ToMemOperand(destination)); - } - } - } else if (source->IsFPRegister()) { - MachineRepresentation rep = LocationOperand::cast(source)->representation(); - if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - MSARegister src = g.ToSimd128Register(source); - if (destination->IsSimd128Register()) { - MSARegister dst = g.ToSimd128Register(destination); - __ move_v(dst, src); - } else { - DCHECK(destination->IsSimd128StackSlot()); - __ st_b(src, g.ToMemOperand(destination)); - } - } else { - FPURegister src = g.ToDoubleRegister(source); - if (destination->IsFPRegister()) { - FPURegister dst = g.ToDoubleRegister(destination); - __ Move(dst, src); - } else { - DCHECK(destination->IsFPStackSlot()); - MachineRepresentation rep = - LocationOperand::cast(source)->representation(); - if (rep == MachineRepresentation::kFloat64) { - __ Sdc1(src, g.ToMemOperand(destination)); - } else if (rep == MachineRepresentation::kFloat32) { - __ swc1(src, g.ToMemOperand(destination)); - } else { - UNREACHABLE(); - } - } - } - } else if (source->IsFPStackSlot()) { - DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); - MemOperand src = g.ToMemOperand(source); - MachineRepresentation rep = LocationOperand::cast(source)->representation(); - if (destination->IsFPRegister()) { - if (rep == MachineRepresentation::kFloat64) { - __ Ldc1(g.ToDoubleRegister(destination), src); - } else if (rep == MachineRepresentation::kFloat32) { - __ lwc1(g.ToDoubleRegister(destination), src); - } else { - DCHECK_EQ(MachineRepresentation::kSimd128, rep); - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - __ ld_b(g.ToSimd128Register(destination), src); - } - } else { - FPURegister temp = kScratchDoubleReg; - if (rep == MachineRepresentation::kFloat64) { - __ Ldc1(temp, src); - __ Sdc1(temp, g.ToMemOperand(destination)); - } else if (rep == MachineRepresentation::kFloat32) { - __ lwc1(temp, src); - __ swc1(temp, g.ToMemOperand(destination)); - } else { - DCHECK_EQ(MachineRepresentation::kSimd128, rep); - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - MSARegister temp = kSimd128ScratchReg; - __ ld_b(temp, src); - __ st_b(temp, g.ToMemOperand(destination)); - } - } - } else { - UNREACHABLE(); - } -} - -void CodeGenerator::AssembleSwap(InstructionOperand* source, - InstructionOperand* destination) { - MipsOperandConverter g(this, nullptr); - // Dispatch on the source and destination operand kinds. Not all - // combinations are possible. - if (source->IsRegister()) { - // Register-register. - Register temp = kScratchReg; - Register src = g.ToRegister(source); - if (destination->IsRegister()) { - Register dst = g.ToRegister(destination); - __ Move(temp, src); - __ Move(src, dst); - __ Move(dst, temp); - } else { - DCHECK(destination->IsStackSlot()); - MemOperand dst = g.ToMemOperand(destination); - __ mov(temp, src); - __ lw(src, dst); - __ sw(temp, dst); - } - } else if (source->IsStackSlot()) { - DCHECK(destination->IsStackSlot()); - Register temp_0 = kScratchReg; - Register temp_1 = kScratchReg2; - MemOperand src = g.ToMemOperand(source); - MemOperand dst = g.ToMemOperand(destination); - __ lw(temp_0, src); - __ lw(temp_1, dst); - __ sw(temp_0, dst); - __ sw(temp_1, src); - } else if (source->IsFPRegister()) { - if (destination->IsFPRegister()) { - MachineRepresentation rep = - LocationOperand::cast(source)->representation(); - if (rep == MachineRepresentation::kSimd128) { - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - MSARegister temp = kSimd128ScratchReg; - MSARegister src = g.ToSimd128Register(source); - MSARegister dst = g.ToSimd128Register(destination); - __ move_v(temp, src); - __ move_v(src, dst); - __ move_v(dst, temp); - } else { - FPURegister temp = kScratchDoubleReg; - FPURegister src = g.ToDoubleRegister(source); - FPURegister dst = g.ToDoubleRegister(destination); - __ Move(temp, src); - __ Move(src, dst); - __ Move(dst, temp); - } - } else { - DCHECK(destination->IsFPStackSlot()); - MemOperand dst = g.ToMemOperand(destination); - MachineRepresentation rep = - LocationOperand::cast(source)->representation(); - if (rep == MachineRepresentation::kFloat64) { - FPURegister temp = kScratchDoubleReg; - FPURegister src = g.ToDoubleRegister(source); - __ Move(temp, src); - __ Ldc1(src, dst); - __ Sdc1(temp, dst); - } else if (rep == MachineRepresentation::kFloat32) { - FPURegister temp = kScratchDoubleReg; - FPURegister src = g.ToFloatRegister(source); - __ Move(temp, src); - __ lwc1(src, dst); - __ swc1(temp, dst); - } else { - DCHECK_EQ(MachineRepresentation::kSimd128, rep); - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - MSARegister temp = kSimd128ScratchReg; - MSARegister src = g.ToSimd128Register(source); - __ move_v(temp, src); - __ ld_b(src, dst); - __ st_b(temp, dst); - } - } - } else if (source->IsFPStackSlot()) { - DCHECK(destination->IsFPStackSlot()); - Register temp_0 = kScratchReg; - FPURegister temp_1 = kScratchDoubleReg; - MemOperand src0 = g.ToMemOperand(source); - MemOperand dst0 = g.ToMemOperand(destination); - MachineRepresentation rep = LocationOperand::cast(source)->representation(); - if (rep == MachineRepresentation::kFloat64) { - MemOperand src1(src0.rm(), src0.offset() + kIntSize); - MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize); - __ Ldc1(temp_1, dst0); // Save destination in temp_1. - __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. - __ sw(temp_0, dst0); - __ lw(temp_0, src1); - __ sw(temp_0, dst1); - __ Sdc1(temp_1, src0); - } else if (rep == MachineRepresentation::kFloat32) { - __ lwc1(temp_1, dst0); // Save destination in temp_1. - __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. - __ sw(temp_0, dst0); - __ swc1(temp_1, src0); - } else { - DCHECK_EQ(MachineRepresentation::kSimd128, rep); - MemOperand src1(src0.rm(), src0.offset() + kIntSize); - MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize); - MemOperand src2(src0.rm(), src0.offset() + 2 * kIntSize); - MemOperand dst2(dst0.rm(), dst0.offset() + 2 * kIntSize); - MemOperand src3(src0.rm(), src0.offset() + 3 * kIntSize); - MemOperand dst3(dst0.rm(), dst0.offset() + 3 * kIntSize); - CpuFeatureScope msa_scope(tasm(), MIPS_SIMD); - MSARegister temp_1 = kSimd128ScratchReg; - __ ld_b(temp_1, dst0); // Save destination in temp_1. - __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. - __ sw(temp_0, dst0); - __ lw(temp_0, src1); - __ sw(temp_0, dst1); - __ lw(temp_0, src2); - __ sw(temp_0, dst2); - __ lw(temp_0, src3); - __ sw(temp_0, dst3); - __ st_b(temp_1, src0); - } - } else { - // No other combinations are possible. - UNREACHABLE(); - } -} - -void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { - // On 32-bit MIPS we emit the jump tables inline. - UNREACHABLE(); -} - -#undef __ -#undef ASSEMBLE_F64X2_ARITHMETIC_BINOP -#undef ASSEMBLE_SIMD_EXTENDED_MULTIPLY - -} // namespace compiler -} // namespace internal -} // namespace v8 diff --git a/src/compiler/backend/mips/instruction-codes-mips.h b/src/compiler/backend/mips/instruction-codes-mips.h deleted file mode 100644 index 66bccbe607..0000000000 --- a/src/compiler/backend/mips/instruction-codes-mips.h +++ /dev/null @@ -1,400 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_ -#define V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_ - -namespace v8 { -namespace internal { -namespace compiler { - -// MIPS-specific opcodes that specify which assembly sequence to emit. -// Most opcodes specify a single instruction. - -#define TARGET_ARCH_OPCODE_LIST(V) \ - V(MipsAdd) \ - V(MipsAddOvf) \ - V(MipsSub) \ - V(MipsSubOvf) \ - V(MipsMul) \ - V(MipsMulOvf) \ - V(MipsMulHigh) \ - V(MipsMulHighU) \ - V(MipsDiv) \ - V(MipsDivU) \ - V(MipsMod) \ - V(MipsModU) \ - V(MipsAnd) \ - V(MipsOr) \ - V(MipsNor) \ - V(MipsXor) \ - V(MipsClz) \ - V(MipsCtz) \ - V(MipsPopcnt) \ - V(MipsLsa) \ - V(MipsShl) \ - V(MipsShr) \ - V(MipsSar) \ - V(MipsShlPair) \ - V(MipsShrPair) \ - V(MipsSarPair) \ - V(MipsExt) \ - V(MipsIns) \ - V(MipsRor) \ - V(MipsMov) \ - V(MipsTst) \ - V(MipsCmp) \ - V(MipsCmpS) \ - V(MipsAddS) \ - V(MipsSubS) \ - V(MipsMulS) \ - V(MipsDivS) \ - V(MipsAbsS) \ - V(MipsSqrtS) \ - V(MipsMaxS) \ - V(MipsMinS) \ - V(MipsCmpD) \ - V(MipsAddD) \ - V(MipsSubD) \ - V(MipsMulD) \ - V(MipsDivD) \ - V(MipsModD) \ - V(MipsAbsD) \ - V(MipsSqrtD) \ - V(MipsMaxD) \ - V(MipsMinD) \ - V(MipsNegS) \ - V(MipsNegD) \ - V(MipsAddPair) \ - V(MipsSubPair) \ - V(MipsMulPair) \ - V(MipsMaddS) \ - V(MipsMaddD) \ - V(MipsMsubS) \ - V(MipsMsubD) \ - V(MipsFloat32RoundDown) \ - V(MipsFloat32RoundTruncate) \ - V(MipsFloat32RoundUp) \ - V(MipsFloat32RoundTiesEven) \ - V(MipsFloat64RoundDown) \ - V(MipsFloat64RoundTruncate) \ - V(MipsFloat64RoundUp) \ - V(MipsFloat64RoundTiesEven) \ - V(MipsCvtSD) \ - V(MipsCvtDS) \ - V(MipsTruncWD) \ - V(MipsRoundWD) \ - V(MipsFloorWD) \ - V(MipsCeilWD) \ - V(MipsTruncWS) \ - V(MipsRoundWS) \ - V(MipsFloorWS) \ - V(MipsCeilWS) \ - V(MipsTruncUwD) \ - V(MipsTruncUwS) \ - V(MipsCvtDW) \ - V(MipsCvtDUw) \ - V(MipsCvtSW) \ - V(MipsCvtSUw) \ - V(MipsLb) \ - V(MipsLbu) \ - V(MipsSb) \ - V(MipsLh) \ - V(MipsUlh) \ - V(MipsLhu) \ - V(MipsUlhu) \ - V(MipsSh) \ - V(MipsUsh) \ - V(MipsLw) \ - V(MipsUlw) \ - V(MipsSw) \ - V(MipsUsw) \ - V(MipsLwc1) \ - V(MipsUlwc1) \ - V(MipsSwc1) \ - V(MipsUswc1) \ - V(MipsLdc1) \ - V(MipsUldc1) \ - V(MipsSdc1) \ - V(MipsUsdc1) \ - V(MipsFloat64ExtractLowWord32) \ - V(MipsFloat64ExtractHighWord32) \ - V(MipsFloat64InsertLowWord32) \ - V(MipsFloat64InsertHighWord32) \ - V(MipsFloat64SilenceNaN) \ - V(MipsFloat32Max) \ - V(MipsFloat64Max) \ - V(MipsFloat32Min) \ - V(MipsFloat64Min) \ - V(MipsPush) \ - V(MipsPeek) \ - V(MipsStoreToStackSlot) \ - V(MipsByteSwap32) \ - V(MipsStackClaim) \ - V(MipsSeb) \ - V(MipsSeh) \ - V(MipsSync) \ - V(MipsS128Zero) \ - V(MipsI32x4Splat) \ - V(MipsI32x4ExtractLane) \ - V(MipsI32x4ReplaceLane) \ - V(MipsI32x4Add) \ - V(MipsI32x4Sub) \ - V(MipsF64x2Abs) \ - V(MipsF64x2Neg) \ - V(MipsF64x2Sqrt) \ - V(MipsF64x2Add) \ - V(MipsF64x2Sub) \ - V(MipsF64x2Mul) \ - V(MipsF64x2Div) \ - V(MipsF64x2Min) \ - V(MipsF64x2Max) \ - V(MipsF64x2Eq) \ - V(MipsF64x2Ne) \ - V(MipsF64x2Lt) \ - V(MipsF64x2Le) \ - V(MipsF64x2Pmin) \ - V(MipsF64x2Pmax) \ - V(MipsF64x2Ceil) \ - V(MipsF64x2Floor) \ - V(MipsF64x2Trunc) \ - V(MipsF64x2NearestInt) \ - V(MipsF64x2ConvertLowI32x4S) \ - V(MipsF64x2ConvertLowI32x4U) \ - V(MipsF64x2PromoteLowF32x4) \ - V(MipsI64x2Add) \ - V(MipsI64x2Sub) \ - V(MipsI64x2Mul) \ - V(MipsI64x2Neg) \ - V(MipsI64x2Shl) \ - V(MipsI64x2ShrS) \ - V(MipsI64x2ShrU) \ - V(MipsI64x2BitMask) \ - V(MipsI64x2Eq) \ - V(MipsI64x2Ne) \ - V(MipsI64x2GtS) \ - V(MipsI64x2GeS) \ - V(MipsI64x2Abs) \ - V(MipsI64x2SConvertI32x4Low) \ - V(MipsI64x2SConvertI32x4High) \ - V(MipsI64x2UConvertI32x4Low) \ - V(MipsI64x2UConvertI32x4High) \ - V(MipsI64x2ExtMulLowI32x4S) \ - V(MipsI64x2ExtMulHighI32x4S) \ - V(MipsI64x2ExtMulLowI32x4U) \ - V(MipsI64x2ExtMulHighI32x4U) \ - V(MipsF32x4Splat) \ - V(MipsF32x4ExtractLane) \ - V(MipsF32x4ReplaceLane) \ - V(MipsF32x4SConvertI32x4) \ - V(MipsF32x4UConvertI32x4) \ - V(MipsF32x4DemoteF64x2Zero) \ - V(MipsI32x4Mul) \ - V(MipsI32x4MaxS) \ - V(MipsI32x4MinS) \ - V(MipsI32x4Eq) \ - V(MipsI32x4Ne) \ - V(MipsI32x4Shl) \ - V(MipsI32x4ShrS) \ - V(MipsI32x4ShrU) \ - V(MipsI32x4MaxU) \ - V(MipsI32x4MinU) \ - V(MipsF64x2Splat) \ - V(MipsF64x2ExtractLane) \ - V(MipsF64x2ReplaceLane) \ - V(MipsF32x4Abs) \ - V(MipsF32x4Neg) \ - V(MipsF32x4Sqrt) \ - V(MipsF32x4Add) \ - V(MipsF32x4Sub) \ - V(MipsF32x4Mul) \ - V(MipsF32x4Div) \ - V(MipsF32x4Max) \ - V(MipsF32x4Min) \ - V(MipsF32x4Eq) \ - V(MipsF32x4Ne) \ - V(MipsF32x4Lt) \ - V(MipsF32x4Le) \ - V(MipsF32x4Pmin) \ - V(MipsF32x4Pmax) \ - V(MipsF32x4Ceil) \ - V(MipsF32x4Floor) \ - V(MipsF32x4Trunc) \ - V(MipsF32x4NearestInt) \ - V(MipsI32x4SConvertF32x4) \ - V(MipsI32x4UConvertF32x4) \ - V(MipsI32x4Neg) \ - V(MipsI32x4GtS) \ - V(MipsI32x4GeS) \ - V(MipsI32x4GtU) \ - V(MipsI32x4GeU) \ - V(MipsI32x4Abs) \ - V(MipsI32x4BitMask) \ - V(MipsI32x4DotI16x8S) \ - V(MipsI32x4ExtMulLowI16x8S) \ - V(MipsI32x4ExtMulHighI16x8S) \ - V(MipsI32x4ExtMulLowI16x8U) \ - V(MipsI32x4ExtMulHighI16x8U) \ - V(MipsI32x4TruncSatF64x2SZero) \ - V(MipsI32x4TruncSatF64x2UZero) \ - V(MipsI32x4ExtAddPairwiseI16x8S) \ - V(MipsI32x4ExtAddPairwiseI16x8U) \ - V(MipsI16x8Splat) \ - V(MipsI16x8ExtractLaneU) \ - V(MipsI16x8ExtractLaneS) \ - V(MipsI16x8ReplaceLane) \ - V(MipsI16x8Neg) \ - V(MipsI16x8Shl) \ - V(MipsI16x8ShrS) \ - V(MipsI16x8ShrU) \ - V(MipsI16x8Add) \ - V(MipsI16x8AddSatS) \ - V(MipsI16x8Sub) \ - V(MipsI16x8SubSatS) \ - V(MipsI16x8Mul) \ - V(MipsI16x8MaxS) \ - V(MipsI16x8MinS) \ - V(MipsI16x8Eq) \ - V(MipsI16x8Ne) \ - V(MipsI16x8GtS) \ - V(MipsI16x8GeS) \ - V(MipsI16x8AddSatU) \ - V(MipsI16x8SubSatU) \ - V(MipsI16x8MaxU) \ - V(MipsI16x8MinU) \ - V(MipsI16x8GtU) \ - V(MipsI16x8GeU) \ - V(MipsI16x8RoundingAverageU) \ - V(MipsI16x8Abs) \ - V(MipsI16x8BitMask) \ - V(MipsI16x8Q15MulRSatS) \ - V(MipsI16x8ExtMulLowI8x16S) \ - V(MipsI16x8ExtMulHighI8x16S) \ - V(MipsI16x8ExtMulLowI8x16U) \ - V(MipsI16x8ExtMulHighI8x16U) \ - V(MipsI16x8ExtAddPairwiseI8x16S) \ - V(MipsI16x8ExtAddPairwiseI8x16U) \ - V(MipsI8x16Splat) \ - V(MipsI8x16ExtractLaneU) \ - V(MipsI8x16ExtractLaneS) \ - V(MipsI8x16ReplaceLane) \ - V(MipsI8x16Neg) \ - V(MipsI8x16Shl) \ - V(MipsI8x16ShrS) \ - V(MipsI8x16Add) \ - V(MipsI8x16AddSatS) \ - V(MipsI8x16Sub) \ - V(MipsI8x16SubSatS) \ - V(MipsI8x16MaxS) \ - V(MipsI8x16MinS) \ - V(MipsI8x16Eq) \ - V(MipsI8x16Ne) \ - V(MipsI8x16GtS) \ - V(MipsI8x16GeS) \ - V(MipsI8x16ShrU) \ - V(MipsI8x16AddSatU) \ - V(MipsI8x16SubSatU) \ - V(MipsI8x16MaxU) \ - V(MipsI8x16MinU) \ - V(MipsI8x16GtU) \ - V(MipsI8x16GeU) \ - V(MipsI8x16RoundingAverageU) \ - V(MipsI8x16Abs) \ - V(MipsI8x16Popcnt) \ - V(MipsI8x16BitMask) \ - V(MipsS128And) \ - V(MipsS128Or) \ - V(MipsS128Xor) \ - V(MipsS128Not) \ - V(MipsS128Select) \ - V(MipsS128AndNot) \ - V(MipsI64x2AllTrue) \ - V(MipsI32x4AllTrue) \ - V(MipsI16x8AllTrue) \ - V(MipsI8x16AllTrue) \ - V(MipsV128AnyTrue) \ - V(MipsS32x4InterleaveRight) \ - V(MipsS32x4InterleaveLeft) \ - V(MipsS32x4PackEven) \ - V(MipsS32x4PackOdd) \ - V(MipsS32x4InterleaveEven) \ - V(MipsS32x4InterleaveOdd) \ - V(MipsS32x4Shuffle) \ - V(MipsS16x8InterleaveRight) \ - V(MipsS16x8InterleaveLeft) \ - V(MipsS16x8PackEven) \ - V(MipsS16x8PackOdd) \ - V(MipsS16x8InterleaveEven) \ - V(MipsS16x8InterleaveOdd) \ - V(MipsS16x4Reverse) \ - V(MipsS16x2Reverse) \ - V(MipsS8x16InterleaveRight) \ - V(MipsS8x16InterleaveLeft) \ - V(MipsS8x16PackEven) \ - V(MipsS8x16PackOdd) \ - V(MipsS8x16InterleaveEven) \ - V(MipsS8x16InterleaveOdd) \ - V(MipsI8x16Shuffle) \ - V(MipsI8x16Swizzle) \ - V(MipsS8x16Concat) \ - V(MipsS8x8Reverse) \ - V(MipsS8x4Reverse) \ - V(MipsS8x2Reverse) \ - V(MipsS128Load8Splat) \ - V(MipsS128Load16Splat) \ - V(MipsS128Load32Splat) \ - V(MipsS128Load64Splat) \ - V(MipsS128Load8x8S) \ - V(MipsS128Load8x8U) \ - V(MipsS128Load16x4S) \ - V(MipsS128Load16x4U) \ - V(MipsS128Load32x2S) \ - V(MipsS128Load32x2U) \ - V(MipsMsaLd) \ - V(MipsMsaSt) \ - V(MipsI32x4SConvertI16x8Low) \ - V(MipsI32x4SConvertI16x8High) \ - V(MipsI32x4UConvertI16x8Low) \ - V(MipsI32x4UConvertI16x8High) \ - V(MipsI16x8SConvertI8x16Low) \ - V(MipsI16x8SConvertI8x16High) \ - V(MipsI16x8SConvertI32x4) \ - V(MipsI16x8UConvertI32x4) \ - V(MipsI16x8UConvertI8x16Low) \ - V(MipsI16x8UConvertI8x16High) \ - V(MipsI8x16SConvertI16x8) \ - V(MipsI8x16UConvertI16x8) \ - V(MipsWord32AtomicPairLoad) \ - V(MipsWord32AtomicPairStore) \ - V(MipsWord32AtomicPairAdd) \ - V(MipsWord32AtomicPairSub) \ - V(MipsWord32AtomicPairAnd) \ - V(MipsWord32AtomicPairOr) \ - V(MipsWord32AtomicPairXor) \ - V(MipsWord32AtomicPairExchange) \ - V(MipsWord32AtomicPairCompareExchange) - -// Addressing modes represent the "shape" of inputs to an instruction. -// Many instructions support multiple addressing modes. Addressing modes -// are encoded into the InstructionCode of the instruction and tell the -// code generator after register allocation which assembler method to call. -// -// We use the following local notation for addressing modes: -// -// R = register -// O = register or stack slot -// D = double register -// I = immediate (handle, external, int32) -// MRI = [register + immediate] -// MRR = [register + register] -// TODO(plind): Add the new r6 address modes. -#define TARGET_ADDRESSING_MODE_LIST(V) \ - V(MRI) /* [%r0 + K] */ \ - V(MRR) /* [%r0 + %r1] */ - -} // namespace compiler -} // namespace internal -} // namespace v8 - -#endif // V8_COMPILER_BACKEND_MIPS_INSTRUCTION_CODES_MIPS_H_ diff --git a/src/compiler/backend/mips/instruction-scheduler-mips.cc b/src/compiler/backend/mips/instruction-scheduler-mips.cc deleted file mode 100644 index 83b80e9933..0000000000 --- a/src/compiler/backend/mips/instruction-scheduler-mips.cc +++ /dev/null @@ -1,1804 +0,0 @@ -// Copyright 2015 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/compiler/backend/code-generator.h" -#include "src/compiler/backend/instruction-scheduler.h" - -namespace v8 { -namespace internal { -namespace compiler { - -bool InstructionScheduler::SchedulerSupported() { return true; } - -int InstructionScheduler::GetTargetInstructionFlags( - const Instruction* instr) const { - switch (instr->arch_opcode()) { - case kMipsAbsD: - case kMipsAbsS: - case kMipsAdd: - case kMipsAddD: - case kMipsAddOvf: - case kMipsAddPair: - case kMipsAddS: - case kMipsAnd: - case kMipsByteSwap32: - case kMipsCeilWD: - case kMipsCeilWS: - case kMipsClz: - case kMipsCmp: - case kMipsCmpD: - case kMipsCmpS: - case kMipsCtz: - case kMipsCvtDS: - case kMipsCvtDUw: - case kMipsCvtDW: - case kMipsCvtSD: - case kMipsCvtSUw: - case kMipsCvtSW: - case kMipsDiv: - case kMipsDivD: - case kMipsDivS: - case kMipsDivU: - case kMipsExt: - case kMipsF64x2Abs: - case kMipsF64x2Neg: - case kMipsF64x2Sqrt: - case kMipsF64x2Add: - case kMipsF64x2Sub: - case kMipsF64x2Mul: - case kMipsF64x2Div: - case kMipsF64x2Min: - case kMipsF64x2Max: - case kMipsF64x2Eq: - case kMipsF64x2Ne: - case kMipsF64x2Lt: - case kMipsF64x2Le: - case kMipsF64x2Splat: - case kMipsF64x2ExtractLane: - case kMipsF64x2ReplaceLane: - case kMipsF64x2Pmin: - case kMipsF64x2Pmax: - case kMipsF64x2Ceil: - case kMipsF64x2Floor: - case kMipsF64x2Trunc: - case kMipsF64x2NearestInt: - case kMipsF64x2ConvertLowI32x4S: - case kMipsF64x2ConvertLowI32x4U: - case kMipsF64x2PromoteLowF32x4: - case kMipsI64x2Add: - case kMipsI64x2Sub: - case kMipsI64x2Mul: - case kMipsI64x2Neg: - case kMipsI64x2Shl: - case kMipsI64x2ShrS: - case kMipsI64x2ShrU: - case kMipsI64x2BitMask: - case kMipsI64x2Eq: - case kMipsI64x2Ne: - case kMipsI64x2GtS: - case kMipsI64x2GeS: - case kMipsI64x2Abs: - case kMipsI64x2SConvertI32x4Low: - case kMipsI64x2SConvertI32x4High: - case kMipsI64x2UConvertI32x4Low: - case kMipsI64x2UConvertI32x4High: - case kMipsI64x2ExtMulLowI32x4S: - case kMipsI64x2ExtMulHighI32x4S: - case kMipsI64x2ExtMulLowI32x4U: - case kMipsI64x2ExtMulHighI32x4U: - case kMipsF32x4Abs: - case kMipsF32x4Add: - case kMipsF32x4Eq: - case kMipsF32x4ExtractLane: - case kMipsF32x4Le: - case kMipsF32x4Lt: - case kMipsF32x4Max: - case kMipsF32x4Min: - case kMipsF32x4Mul: - case kMipsF32x4Div: - case kMipsF32x4Ne: - case kMipsF32x4Neg: - case kMipsF32x4Sqrt: - case kMipsF32x4ReplaceLane: - case kMipsF32x4SConvertI32x4: - case kMipsF32x4Splat: - case kMipsF32x4Sub: - case kMipsF32x4UConvertI32x4: - case kMipsF32x4Pmin: - case kMipsF32x4Pmax: - case kMipsF32x4Ceil: - case kMipsF32x4Floor: - case kMipsF32x4Trunc: - case kMipsF32x4NearestInt: - case kMipsF32x4DemoteF64x2Zero: - case kMipsFloat32Max: - case kMipsFloat32Min: - case kMipsFloat32RoundDown: - case kMipsFloat32RoundTiesEven: - case kMipsFloat32RoundTruncate: - case kMipsFloat32RoundUp: - case kMipsFloat64ExtractHighWord32: - case kMipsFloat64ExtractLowWord32: - case kMipsFloat64InsertHighWord32: - case kMipsFloat64InsertLowWord32: - case kMipsFloat64Max: - case kMipsFloat64Min: - case kMipsFloat64RoundDown: - case kMipsFloat64RoundTiesEven: - case kMipsFloat64RoundTruncate: - case kMipsFloat64RoundUp: - case kMipsFloat64SilenceNaN: - case kMipsFloorWD: - case kMipsFloorWS: - case kMipsI16x8Add: - case kMipsI16x8AddSatS: - case kMipsI16x8AddSatU: - case kMipsI16x8Eq: - case kMipsI16x8ExtractLaneU: - case kMipsI16x8ExtractLaneS: - case kMipsI16x8GeS: - case kMipsI16x8GeU: - case kMipsI16x8RoundingAverageU: - case kMipsI16x8GtS: - case kMipsI16x8GtU: - case kMipsI16x8MaxS: - case kMipsI16x8MaxU: - case kMipsI16x8MinS: - case kMipsI16x8MinU: - case kMipsI16x8Mul: - case kMipsI16x8Ne: - case kMipsI16x8Neg: - case kMipsI16x8ReplaceLane: - case kMipsI16x8SConvertI32x4: - case kMipsI16x8SConvertI8x16High: - case kMipsI16x8SConvertI8x16Low: - case kMipsI16x8Shl: - case kMipsI16x8ShrS: - case kMipsI16x8ShrU: - case kMipsI16x8Splat: - case kMipsI16x8Sub: - case kMipsI16x8SubSatS: - case kMipsI16x8SubSatU: - case kMipsI16x8UConvertI32x4: - case kMipsI16x8UConvertI8x16High: - case kMipsI16x8UConvertI8x16Low: - case kMipsI16x8Abs: - case kMipsI16x8BitMask: - case kMipsI16x8Q15MulRSatS: - case kMipsI16x8ExtMulLowI8x16S: - case kMipsI16x8ExtMulHighI8x16S: - case kMipsI16x8ExtMulLowI8x16U: - case kMipsI16x8ExtMulHighI8x16U: - case kMipsI16x8ExtAddPairwiseI8x16S: - case kMipsI16x8ExtAddPairwiseI8x16U: - case kMipsI32x4ExtAddPairwiseI16x8S: - case kMipsI32x4ExtAddPairwiseI16x8U: - case kMipsI32x4Add: - case kMipsI32x4Eq: - case kMipsI32x4ExtractLane: - case kMipsI32x4GeS: - case kMipsI32x4GeU: - case kMipsI32x4GtS: - case kMipsI32x4GtU: - case kMipsI32x4MaxS: - case kMipsI32x4MaxU: - case kMipsI32x4MinS: - case kMipsI32x4MinU: - case kMipsI32x4Mul: - case kMipsI32x4Ne: - case kMipsI32x4Neg: - case kMipsI32x4ReplaceLane: - case kMipsI32x4SConvertF32x4: - case kMipsI32x4SConvertI16x8High: - case kMipsI32x4SConvertI16x8Low: - case kMipsI32x4Shl: - case kMipsI32x4ShrS: - case kMipsI32x4ShrU: - case kMipsI32x4Splat: - case kMipsI32x4Sub: - case kMipsI32x4UConvertF32x4: - case kMipsI32x4UConvertI16x8High: - case kMipsI32x4UConvertI16x8Low: - case kMipsI32x4Abs: - case kMipsI32x4BitMask: - case kMipsI32x4DotI16x8S: - case kMipsI32x4ExtMulLowI16x8S: - case kMipsI32x4ExtMulHighI16x8S: - case kMipsI32x4ExtMulLowI16x8U: - case kMipsI32x4ExtMulHighI16x8U: - case kMipsI32x4TruncSatF64x2SZero: - case kMipsI32x4TruncSatF64x2UZero: - case kMipsI8x16Add: - case kMipsI8x16AddSatS: - case kMipsI8x16AddSatU: - case kMipsI8x16Eq: - case kMipsI8x16ExtractLaneU: - case kMipsI8x16ExtractLaneS: - case kMipsI8x16GeS: - case kMipsI8x16GeU: - case kMipsI8x16RoundingAverageU: - case kMipsI8x16GtS: - case kMipsI8x16GtU: - case kMipsI8x16MaxS: - case kMipsI8x16MaxU: - case kMipsI8x16MinS: - case kMipsI8x16MinU: - case kMipsI8x16Ne: - case kMipsI8x16Neg: - case kMipsI8x16ReplaceLane: - case kMipsI8x16SConvertI16x8: - case kMipsI8x16Shl: - case kMipsI8x16ShrS: - case kMipsI8x16ShrU: - case kMipsI8x16Splat: - case kMipsI8x16Sub: - case kMipsI8x16SubSatS: - case kMipsI8x16SubSatU: - case kMipsI8x16UConvertI16x8: - case kMipsI8x16Abs: - case kMipsI8x16Popcnt: - case kMipsI8x16BitMask: - case kMipsIns: - case kMipsLsa: - case kMipsMaddD: - case kMipsMaddS: - case kMipsMaxD: - case kMipsMaxS: - case kMipsMinD: - case kMipsMinS: - case kMipsMod: - case kMipsModU: - case kMipsMov: - case kMipsMsubD: - case kMipsMsubS: - case kMipsMul: - case kMipsMulD: - case kMipsMulHigh: - case kMipsMulHighU: - case kMipsMulOvf: - case kMipsMulPair: - case kMipsMulS: - case kMipsNegD: - case kMipsNegS: - case kMipsNor: - case kMipsOr: - case kMipsPopcnt: - case kMipsRor: - case kMipsRoundWD: - case kMipsRoundWS: - case kMipsS128And: - case kMipsS128Not: - case kMipsS128Or: - case kMipsS128Select: - case kMipsS128Xor: - case kMipsS128Zero: - case kMipsS128AndNot: - case kMipsS16x2Reverse: - case kMipsS16x4Reverse: - case kMipsS16x8InterleaveEven: - case kMipsS16x8InterleaveLeft: - case kMipsS16x8InterleaveOdd: - case kMipsS16x8InterleaveRight: - case kMipsS16x8PackEven: - case kMipsS16x8PackOdd: - case kMipsI64x2AllTrue: - case kMipsI32x4AllTrue: - case kMipsI16x8AllTrue: - case kMipsI8x16AllTrue: - case kMipsV128AnyTrue: - case kMipsS32x4InterleaveEven: - case kMipsS32x4InterleaveLeft: - case kMipsS32x4InterleaveOdd: - case kMipsS32x4InterleaveRight: - case kMipsS32x4PackEven: - case kMipsS32x4PackOdd: - case kMipsS32x4Shuffle: - case kMipsS8x16Concat: - case kMipsS8x16InterleaveEven: - case kMipsS8x16InterleaveLeft: - case kMipsS8x16InterleaveOdd: - case kMipsS8x16InterleaveRight: - case kMipsS8x16PackEven: - case kMipsS8x16PackOdd: - case kMipsI8x16Shuffle: - case kMipsI8x16Swizzle: - case kMipsS8x2Reverse: - case kMipsS8x4Reverse: - case kMipsS8x8Reverse: - case kMipsSar: - case kMipsSarPair: - case kMipsSeb: - case kMipsSeh: - case kMipsShl: - case kMipsShlPair: - case kMipsShr: - case kMipsShrPair: - case kMipsSqrtD: - case kMipsSqrtS: - case kMipsSub: - case kMipsSubD: - case kMipsSubOvf: - case kMipsSubPair: - case kMipsSubS: - case kMipsTruncUwD: - case kMipsTruncUwS: - case kMipsTruncWD: - case kMipsTruncWS: - case kMipsTst: - case kMipsXor: - return kNoOpcodeFlags; - - case kMipsLb: - case kMipsLbu: - case kMipsLdc1: - case kMipsLh: - case kMipsLhu: - case kMipsLw: - case kMipsLwc1: - case kMipsMsaLd: - case kMipsPeek: - case kMipsUldc1: - case kMipsUlh: - case kMipsUlhu: - case kMipsUlw: - case kMipsUlwc1: - case kMipsS128Load8Splat: - case kMipsS128Load16Splat: - case kMipsS128Load32Splat: - case kMipsS128Load64Splat: - case kMipsS128Load8x8S: - case kMipsS128Load8x8U: - case kMipsS128Load16x4S: - case kMipsS128Load16x4U: - case kMipsS128Load32x2S: - case kMipsS128Load32x2U: - case kMipsWord32AtomicPairLoad: - return kIsLoadOperation; - - case kMipsModD: - case kMipsMsaSt: - case kMipsPush: - case kMipsSb: - case kMipsSdc1: - case kMipsSh: - case kMipsStackClaim: - case kMipsStoreToStackSlot: - case kMipsSw: - case kMipsSwc1: - case kMipsUsdc1: - case kMipsUsh: - case kMipsUsw: - case kMipsUswc1: - case kMipsSync: - case kMipsWord32AtomicPairStore: - case kMipsWord32AtomicPairAdd: - case kMipsWord32AtomicPairSub: - case kMipsWord32AtomicPairAnd: - case kMipsWord32AtomicPairOr: - case kMipsWord32AtomicPairXor: - case kMipsWord32AtomicPairExchange: - case kMipsWord32AtomicPairCompareExchange: - return kHasSideEffect; - -#define CASE(Name) case k##Name: - COMMON_ARCH_OPCODE_LIST(CASE) -#undef CASE - // Already covered in architecture independent code. - UNREACHABLE(); - } - - UNREACHABLE(); -} - -enum Latency { - BRANCH = 4, // Estimated max. - RINT_S = 4, // Estimated. - RINT_D = 4, // Estimated. - - MULT = 4, - MULTU = 4, - MADD = 4, - MADDU = 4, - MSUB = 4, - MSUBU = 4, - - MUL = 7, - MULU = 7, - MUH = 7, - MUHU = 7, - - DIV = 50, // Min:11 Max:50 - DIVU = 50, - - ABS_S = 4, - ABS_D = 4, - NEG_S = 4, - NEG_D = 4, - ADD_S = 4, - ADD_D = 4, - SUB_S = 4, - SUB_D = 4, - MAX_S = 4, // Estimated. - MAX_D = 4, // Estimated. - C_cond_S = 4, - C_cond_D = 4, - MUL_S = 4, - - MADD_S = 4, - MSUB_S = 4, - NMADD_S = 4, - NMSUB_S = 4, - - CABS_cond_S = 4, - CABS_cond_D = 4, - - CVT_D_S = 4, - CVT_PS_PW = 4, - - CVT_S_W = 4, - CVT_S_L = 4, - CVT_D_W = 4, - CVT_D_L = 4, - - CVT_S_D = 4, - - CVT_W_S = 4, - CVT_W_D = 4, - CVT_L_S = 4, - CVT_L_D = 4, - - CEIL_W_S = 4, - CEIL_W_D = 4, - CEIL_L_S = 4, - CEIL_L_D = 4, - - FLOOR_W_S = 4, - FLOOR_W_D = 4, - FLOOR_L_S = 4, - FLOOR_L_D = 4, - - ROUND_W_S = 4, - ROUND_W_D = 4, - ROUND_L_S = 4, - ROUND_L_D = 4, - - TRUNC_W_S = 4, - TRUNC_W_D = 4, - TRUNC_L_S = 4, - TRUNC_L_D = 4, - - MOV_S = 4, - MOV_D = 4, - - MOVF_S = 4, - MOVF_D = 4, - - MOVN_S = 4, - MOVN_D = 4, - - MOVT_S = 4, - MOVT_D = 4, - - MOVZ_S = 4, - MOVZ_D = 4, - - MUL_D = 5, - MADD_D = 5, - MSUB_D = 5, - NMADD_D = 5, - NMSUB_D = 5, - - RECIP_S = 13, - RECIP_D = 26, - - RSQRT_S = 17, - RSQRT_D = 36, - - DIV_S = 17, - SQRT_S = 17, - - DIV_D = 32, - SQRT_D = 32, - - MTC1 = 4, - MTHC1 = 4, - DMTC1 = 4, - LWC1 = 4, - LDC1 = 4, - LDXC1 = 4, - LUXC1 = 4, - LWXC1 = 4, - - MFC1 = 1, - MFHC1 = 1, - MFHI = 1, - MFLO = 1, - DMFC1 = 1, - SWC1 = 1, - SDC1 = 1, - SDXC1 = 1, - SUXC1 = 1, - SWXC1 = 1, -}; - -int ClzLatency() { - if (IsMipsArchVariant(kLoongson)) { - return (6 + 2 * Latency::BRANCH); - } else { - return 1; - } -} - -int RorLatency(bool is_operand_register = true) { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - if (is_operand_register) { - return 4; - } else { - return 3; // Estimated max. - } - } -} - -int AdduLatency(bool is_operand_register = true) { - if (is_operand_register) { - return 1; - } else { - return 2; // Estimated max. - } -} - -int XorLatency(bool is_operand_register = true) { - return AdduLatency(is_operand_register); -} - -int AndLatency(bool is_operand_register = true) { - return AdduLatency(is_operand_register); -} - -int OrLatency(bool is_operand_register = true) { - return AdduLatency(is_operand_register); -} - -int SubuLatency(bool is_operand_register = true) { - return AdduLatency(is_operand_register); -} - -int MulLatency(bool is_operand_register = true) { - if (is_operand_register) { - if (IsMipsArchVariant(kLoongson)) { - return Latency::MULT + 1; - } else { - return Latency::MUL + 1; - } - } else { - if (IsMipsArchVariant(kLoongson)) { - return Latency::MULT + 2; - } else { - return Latency::MUL + 2; - } - } -} - -int NorLatency(bool is_operand_register = true) { - if (is_operand_register) { - return 1; - } else { - return 2; - } -} - -int InsLatency() { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - return SubuLatency(false) + 7; - } -} - -int ShlPairLatency(bool is_operand_register = true) { - if (is_operand_register) { - int latency = - AndLatency(false) + NorLatency() + OrLatency() + AndLatency(false) + 4; - if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { - return latency + Latency::BRANCH + 2; - } else { - return latency + 2; - } - } else { - return 2; - } -} - -int ShrPairLatency(bool is_operand_register = true, uint32_t shift = 0) { - if (is_operand_register) { - int latency = - AndLatency(false) + NorLatency() + OrLatency() + AndLatency(false) + 4; - if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { - return latency + Latency::BRANCH + 2; - } else { - return latency + 2; - } - } else { - // Estimated max. - return (InsLatency() + 2 > OrLatency() + 3) ? InsLatency() + 2 - : OrLatency() + 3; - } -} - -int SarPairLatency(bool is_operand_register = true, uint32_t shift = 0) { - if (is_operand_register) { - return AndLatency(false) + NorLatency() + OrLatency() + AndLatency(false) + - Latency::BRANCH + 6; - } else { - shift = shift & 0x3F; - if (shift == 0) { - return 2; - } else if (shift < 32) { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - return InsLatency() + 2; - } else { - return OrLatency() + 3; - } - } else if (shift == 32) { - return 2; - } else { - return 2; - } - } -} - -int ExtLatency() { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - // Estimated max. - return 2; - } -} - -int LsaLatency() { - // Estimated max. - return AdduLatency() + 1; -} - -int SltLatency(bool is_operand_register = true) { - if (is_operand_register) { - return 1; - } else { - return 2; // Estimated max. - } -} - -int SltuLatency(bool is_operand_register = true) { - return SltLatency(is_operand_register); -} - -int AddPairLatency() { return 3 * AdduLatency() + SltLatency(); } - -int SubPairLatency() { return SltuLatency() + 3 * SubuLatency(); } - -int MuluLatency(bool is_operand_register = true) { - int latency = 0; - if (!is_operand_register) latency++; - if (!IsMipsArchVariant(kMips32r6)) { - return latency + Latency::MULTU + 2; - } else { - return latency + Latency::MULU + Latency::MUHU; - } -} - -int MulPairLatency() { - return MuluLatency() + 2 * MulLatency() + 2 * AdduLatency(); -} - -int MaddSLatency() { - if (IsMipsArchVariant(kMips32r2)) { - return Latency::MADD_D; - } else { - return Latency::MUL_D + Latency::ADD_D; - } -} - -int MaddDLatency() { - if (IsMipsArchVariant(kMips32r2)) { - return Latency::MADD_D; - } else { - return Latency::MUL_D + Latency::ADD_D; - } -} - -int MsubSLatency() { - if (IsMipsArchVariant(kMips32r2)) { - return Latency::MSUB_S; - } else { - return Latency::MUL_S + Latency::SUB_S; - } -} - -int MsubDLatency() { - if (IsMipsArchVariant(kMips32r2)) { - return Latency::MSUB_D; - } else { - return Latency::MUL_D + Latency::SUB_D; - } -} - -int Mfhc1Latency() { - if (IsFp32Mode()) { - return Latency::MFC1; - } else { - return 1; - } -} - -int Mthc1Latency() { - if (IsFp32Mode()) { - return Latency::MTC1; - } else { - return 1; - } -} - -int MoveLatency(bool is_double_register = true) { - if (!is_double_register) { - return Latency::MTC1 + 1; - } else { - return Mthc1Latency() + 1; // Estimated. - } -} - -int Float64RoundLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return Latency::RINT_D + 4; - } else { - // For ceil_l_d, floor_l_d, round_l_d, trunc_l_d latency is 4. - return Mfhc1Latency() + ExtLatency() + Latency::BRANCH + Latency::MOV_D + - 4 + MoveLatency() + 1 + Latency::BRANCH + Latency::CVT_D_L; - } -} - -int Float32RoundLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return Latency::RINT_S + 4; - } else { - // For ceil_w_s, floor_w_s, round_w_s, trunc_w_s latency is 4. - return Latency::MFC1 + ExtLatency() + Latency::BRANCH + Latency::MOV_S + 4 + - Latency::MFC1 + Latency::BRANCH + Latency::CVT_S_W; - } -} - -int CvtDUwLatency() { - if (IsFp64Mode()) { - return Latency::MTC1 + Mthc1Latency() + Latency::CVT_D_L; - } else { - return Latency::BRANCH + Latency::MTC1 + 1 + Latency::MTC1 + - Mthc1Latency() + Latency::CVT_D_W + Latency::BRANCH + - Latency::ADD_D + Latency::CVT_D_W; - } -} - -int CvtSUwLatency() { return CvtDUwLatency() + Latency::CVT_S_D; } - -int Floor_w_dLatency() { - if (IsMipsArchVariant(kLoongson)) { - return Mfhc1Latency() + Latency::FLOOR_W_D + Mthc1Latency(); - } else { - return Latency::FLOOR_W_D; - } -} - -int FloorWDLatency() { return Floor_w_dLatency() + Latency::MFC1; } - -int Ceil_w_dLatency() { - if (IsMipsArchVariant(kLoongson)) { - return Mfhc1Latency() + Latency::CEIL_W_D + Mthc1Latency(); - } else { - return Latency::CEIL_W_D; - } -} - -int CeilWDLatency() { return Ceil_w_dLatency() + Latency::MFC1; } - -int Round_w_dLatency() { - if (IsMipsArchVariant(kLoongson)) { - return Mfhc1Latency() + Latency::ROUND_W_D + Mthc1Latency(); - } else { - return Latency::ROUND_W_D; - } -} - -int RoundWDLatency() { return Round_w_dLatency() + Latency::MFC1; } - -int Trunc_w_dLatency() { - if (IsMipsArchVariant(kLoongson)) { - return Mfhc1Latency() + Latency::TRUNC_W_D + Mthc1Latency(); - } else { - return Latency::TRUNC_W_D; - } -} - -int MovnLatency() { - if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { - return Latency::BRANCH + 1; - } else { - return 1; - } -} - -int Trunc_uw_dLatency() { - return 1 + Latency::MTC1 + Mthc1Latency() + Latency::BRANCH + Latency::SUB_D + - Latency::TRUNC_W_D + Latency::MFC1 + OrLatency(false) + - Latency::BRANCH + Latency::TRUNC_W_D + Latency::MFC1; -} - -int Trunc_uw_sLatency() { - return 1 + Latency::MTC1 + Latency::BRANCH + Latency::SUB_S + - Latency::TRUNC_W_S + Latency::MFC1 + OrLatency(false) + - Latency::TRUNC_W_S + Latency::MFC1; -} - -int MovzLatency() { - if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) { - return Latency::BRANCH + 1; - } else { - return 1; - } -} - -int FmoveLowLatency() { - if (IsFp32Mode()) { - return Latency::MTC1; - } else { - return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1; - } -} - -int SebLatency() { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - return 2; - } -} - -int SehLatency() { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - return 2; - } -} - -int UlhuLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - return 4; - } -} - -int UlhLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - return 4; - } -} - -int AdjustBaseAndOffsetLatency() { - return 3; // Estimated max. -} - -int UshLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - return AdjustBaseAndOffsetLatency() + 4; // Estimated max. - } -} - -int UlwLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - return AdjustBaseAndOffsetLatency() + 3; // Estimated max. - } -} - -int UswLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return 1; - } else { - return AdjustBaseAndOffsetLatency() + 2; - } -} - -int Ulwc1Latency() { - if (IsMipsArchVariant(kMips32r6)) { - return Latency::LWC1; - } else { - return UlwLatency() + Latency::MTC1; - } -} - -int Uswc1Latency() { - if (IsMipsArchVariant(kMips32r6)) { - return Latency::SWC1; - } else { - return Latency::MFC1 + UswLatency(); - } -} - -int Ldc1Latency() { - int latency = AdjustBaseAndOffsetLatency() + Latency::LWC1; - if (IsFp32Mode()) { - return latency + Latency::LWC1; - } else { - return latency + 1 + Mthc1Latency(); - } -} - -int Uldc1Latency() { - if (IsMipsArchVariant(kMips32r6)) { - return Ldc1Latency(); - } else { - return 2 * UlwLatency() + Latency::MTC1 + Mthc1Latency(); - } -} - -int Sdc1Latency() { - int latency = AdjustBaseAndOffsetLatency() + Latency::SWC1; - if (IsFp32Mode()) { - return latency + Latency::SWC1; - } else { - return latency + Mfhc1Latency() + 1; - } -} - -int Usdc1Latency() { - if (IsMipsArchVariant(kMips32r6)) { - return Sdc1Latency(); - } else { - return Latency::MFC1 + 2 * UswLatency() + Mfhc1Latency(); - } -} - -int PushRegisterLatency() { return AdduLatency(false) + 1; } - -int ByteSwapSignedLatency() { - // operand_size == 4 - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - return 2; - } else if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) { - return 10; - } -} - -int LlLatency(int offset) { - bool is_one_instruction = - IsMipsArchVariant(kMips32r6) ? is_int9(offset) : is_int16(offset); - if (is_one_instruction) { - return 1; - } else { - return 3; - } -} - -int ExtractBitsLatency(int size, bool sign_extend) { - int latency = 1 + ExtLatency(); - if (size == 8) { - if (sign_extend) { - return latency + SebLatency(); - } else { - return 0; - } - } else if (size == 16) { - if (sign_extend) { - return latency + SehLatency(); - } else { - return 0; - } - } else { - UNREACHABLE(); - } -} - -int NegLatency() { return 1; } - -int InsertBitsLatency() { - return RorLatency() + InsLatency() + SubuLatency(false) + NegLatency() + - RorLatency(); -} - -int ScLatency(int offset) { - bool is_one_instruction = - IsMipsArchVariant(kMips32r6) ? is_int9(offset) : is_int16(offset); - if (is_one_instruction) { - return 1; - } else { - return 3; - } -} - -int BranchShortHelperR6Latency() { - return 2; // Estimated max. -} - -int BranchShortHelperLatency() { - return SltLatency() + 2; // Estimated max. -} - -int BranchShortLatency(BranchDelaySlot bdslot = PROTECT) { - if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) { - return BranchShortHelperR6Latency(); - } else { - return BranchShortHelperLatency(); - } -} - -int Word32AtomicExchangeLatency(bool sign_extend, int size) { - return AdduLatency() + 1 + SubuLatency() + 2 + LlLatency(0) + - ExtractBitsLatency(size, sign_extend) + InsertBitsLatency() + - ScLatency(0) + BranchShortLatency() + 1; -} - -int Word32AtomicCompareExchangeLatency(bool sign_extend, int size) { - return AdduLatency() + 1 + SubuLatency() + 2 + LlLatency(0) + - ExtractBitsLatency(size, sign_extend) + BranchShortLatency() + 1; -} - -int AddOverflowLatency() { - return 6; // Estimated max. -} - -int SubOverflowLatency() { - return 6; // Estimated max. -} - -int MulhLatency(bool is_operand_register = true) { - if (is_operand_register) { - if (!IsMipsArchVariant(kMips32r6)) { - return Latency::MULT + Latency::MFHI; - } else { - return Latency::MUH; - } - } else { - if (!IsMipsArchVariant(kMips32r6)) { - return 1 + Latency::MULT + Latency::MFHI; - } else { - return 1 + Latency::MUH; - } - } -} - -int MulhuLatency(bool is_operand_register = true) { - if (is_operand_register) { - if (!IsMipsArchVariant(kMips32r6)) { - return Latency::MULTU + Latency::MFHI; - } else { - return Latency::MUHU; - } - } else { - if (!IsMipsArchVariant(kMips32r6)) { - return 1 + Latency::MULTU + Latency::MFHI; - } else { - return 1 + Latency::MUHU; - } - } -} - -int MulOverflowLatency() { - return MulLatency() + 4; // Estimated max. -} - -int ModLatency(bool is_operand_register = true) { - if (is_operand_register) { - if (!IsMipsArchVariant(kMips32r6)) { - return Latency::DIV + Latency::MFHI; - } else { - return 1; - } - } else { - if (!IsMipsArchVariant(kMips32r6)) { - return 1 + Latency::DIV + Latency::MFHI; - } else { - return 2; - } - } -} - -int ModuLatency(bool is_operand_register = true) { - return ModLatency(is_operand_register); -} - -int DivLatency(bool is_operand_register = true) { - if (is_operand_register) { - if (!IsMipsArchVariant(kMips32r6)) { - return Latency::DIV + Latency::MFLO; - } else { - return Latency::DIV; - } - } else { - if (!IsMipsArchVariant(kMips32r6)) { - return 1 + Latency::DIV + Latency::MFLO; - } else { - return 1 + Latency::DIV; - } - } -} - -int DivuLatency(bool is_operand_register = true) { - if (is_operand_register) { - if (!IsMipsArchVariant(kMips32r6)) { - return Latency::DIVU + Latency::MFLO; - } else { - return Latency::DIVU; - } - } else { - if (!IsMipsArchVariant(kMips32r6)) { - return 1 + Latency::DIVU + Latency::MFLO; - } else { - return 1 + Latency::DIVU; - } - } -} - -int CtzLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return RorLatency(false) + 2 + ClzLatency(); - } else { - return AdduLatency(false) + XorLatency() + AndLatency() + ClzLatency() + 1 + - SubuLatency(); - } -} - -int PopcntLatency() { - return 4 * AndLatency() + SubuLatency() + 2 * AdduLatency() + MulLatency() + - 8; -} - -int CompareFLatency() { return Latency::C_cond_S; } - -int CompareIsNanFLatency() { return CompareFLatency(); } - -int CompareIsNanF32Latency() { return CompareIsNanFLatency(); } - -int Neg_sLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return Latency::NEG_S; - } else { - // Estimated. - return CompareIsNanF32Latency() + 2 * Latency::BRANCH + Latency::NEG_S + - Latency::MFC1 + 1 + XorLatency() + Latency::MTC1; - } -} - -int CompareIsNanF64Latency() { return CompareIsNanFLatency(); } - -int Neg_dLatency() { - if (IsMipsArchVariant(kMips32r6)) { - return Latency::NEG_D; - } else { - // Estimated. - return CompareIsNanF64Latency() + 2 * Latency::BRANCH + Latency::NEG_D + - Mfhc1Latency() + 1 + XorLatency() + Mthc1Latency(); - } -} - -int CompareF32Latency() { return CompareFLatency(); } - -int Move_sLatency() { - return Latency::MOV_S; // Estimated max. -} - -int Float32MaxLatency() { - // Estimated max. - int latency = CompareIsNanF32Latency() + Latency::BRANCH; - if (IsMipsArchVariant(kMips32r6)) { - return latency + Latency::MAX_S; - } else { - return latency + 5 * Latency::BRANCH + 2 * CompareF32Latency() + - Latency::MFC1 + Move_sLatency(); - } -} - -int CompareF64Latency() { return CompareF32Latency(); } - -int Move_dLatency() { - return Latency::MOV_D; // Estimated max. -} - -int Float64MaxLatency() { - // Estimated max. - int latency = CompareIsNanF64Latency() + Latency::BRANCH; - if (IsMipsArchVariant(kMips32r6)) { - return latency + Latency::MAX_D; - } else { - return latency + 5 * Latency::BRANCH + 2 * CompareF64Latency() + - Latency::MFHC1 + 2 * Move_dLatency(); - } -} - -int PrepareCallCFunctionLatency() { - int frame_alignment = TurboAssembler::ActivationFrameAlignment(); - if (frame_alignment > kSystemPointerSize) { - return 1 + SubuLatency(false) + AndLatency(false) + 1; - } else { - return SubuLatency(false); - } -} - -int MovToFloatParametersLatency() { return 2 * MoveLatency(); } - -int CallLatency() { - // Estimated. - return AdduLatency(false) + Latency::BRANCH + 3; -} - -int CallCFunctionHelperLatency() { - // Estimated. - int latency = AndLatency(false) + Latency::BRANCH + 2 + CallLatency(); - if (base::OS::ActivationFrameAlignment() > kSystemPointerSize) { - latency++; - } else { - latency += AdduLatency(false); - } - return latency; -} - -int CallCFunctionLatency() { return 1 + CallCFunctionHelperLatency(); } - -int MovFromFloatResultLatency() { return MoveLatency(); } - -int Float32MinLatency() { - // Estimated max. - return CompareIsNanF32Latency() + Latency::BRANCH + - 2 * (CompareF32Latency() + Latency::BRANCH) + Latency::MFC1 + - 2 * Latency::BRANCH + Move_sLatency(); -} - -int Float64MinLatency() { - // Estimated max. - return CompareIsNanF64Latency() + Latency::BRANCH + - 2 * (CompareF64Latency() + Latency::BRANCH) + Mfhc1Latency() + - 2 * Latency::BRANCH + Move_dLatency(); -} - -int SmiUntagLatency() { return 1; } - -int PrepareForTailCallLatency() { - // Estimated max. - return 2 * (LsaLatency() + AdduLatency(false)) + 2 + Latency::BRANCH + - Latency::BRANCH + 2 * SubuLatency(false) + 2 + Latency::BRANCH + 1; -} - -int JumpLatency() { - // Estimated max. - return 1 + AdduLatency(false) + Latency::BRANCH + 2; -} - -int AssertLatency() { return 1; } - -int MultiPushLatency() { - int latency = SubuLatency(false); - for (int16_t i = kNumRegisters - 1; i >= 0; i--) { - latency++; - } - return latency; -} - -int MultiPushFPULatency() { - int latency = SubuLatency(false); - for (int16_t i = kNumRegisters - 1; i >= 0; i--) { - latency += Sdc1Latency(); - } - return latency; -} - -int PushCallerSavedLatency(SaveFPRegsMode fp_mode) { - int latency = MultiPushLatency(); - if (fp_mode == SaveFPRegsMode::kSave) { - latency += MultiPushFPULatency(); - } - return latency; -} - -int MultiPopFPULatency() { - int latency = 0; - for (int16_t i = 0; i < kNumRegisters; i++) { - latency += Ldc1Latency(); - } - return latency++; -} - -int MultiPopLatency() { - int latency = 0; - for (int16_t i = 0; i < kNumRegisters; i++) { - latency++; - } - return latency++; -} - -int PopCallerSavedLatency(SaveFPRegsMode fp_mode) { - int latency = 0; - if (fp_mode == SaveFPRegsMode::kSave) { - latency += MultiPopFPULatency(); - } - return latency + MultiPopLatency(); -} - -int AssembleArchJumpLatency() { - // Estimated max. - return Latency::BRANCH; -} - -int AssembleArchBinarySearchSwitchLatency(int cases) { - if (cases < CodeGenerator::kBinarySearchSwitchMinimalCases) { - return cases * (1 + Latency::BRANCH) + AssembleArchJumpLatency(); - } - return 1 + Latency::BRANCH + AssembleArchBinarySearchSwitchLatency(cases / 2); -} - -int GenerateSwitchTableLatency() { - int latency = 0; - if (kArchVariant >= kMips32r6) { - latency = LsaLatency() + 2; - } else { - latency = 6; - } - latency += 2; - return latency; -} - -int AssembleArchTableSwitchLatency() { - return Latency::BRANCH + GenerateSwitchTableLatency(); -} - -int AssembleReturnLatency() { - // Estimated max. - return AdduLatency(false) + MultiPopLatency() + MultiPopFPULatency() + - Latency::BRANCH + 1 + AdduLatency() + 8; -} - -int TryInlineTruncateDoubleToILatency() { - return 2 + Latency::TRUNC_W_D + Latency::MFC1 + 2 + AndLatency(false) + - Latency::BRANCH; -} - -int CallStubDelayedLatency() { return 1 + CallLatency(); } - -int TruncateDoubleToIDelayedLatency() { - // TODO(mips): This no longer reflects how TruncateDoubleToI is called. - return TryInlineTruncateDoubleToILatency() + 1 + SubuLatency(false) + - Sdc1Latency() + CallStubDelayedLatency() + AdduLatency(false) + 1; -} - -int CheckPageFlagLatency() { - return 2 * AndLatency(false) + 1 + Latency::BRANCH; -} - -int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { - // Basic latency modeling for MIPS32 instructions. They have been determined - // in an empirical way. - switch (instr->arch_opcode()) { - case kArchCallCodeObject: -#if V8_ENABLE_WEBASSEMBLY - case kArchCallWasmFunction: -#endif // V8_ENABLE_WEBASSEMBLY - return CallLatency(); - case kArchTailCallCodeObject: -#if V8_ENABLE_WEBASSEMBLY - case kArchTailCallWasm: -#endif // V8_ENABLE_WEBASSEMBLY - case kArchTailCallAddress: - return JumpLatency(); - case kArchCallJSFunction: { - int latency = 0; - if (FLAG_debug_code) { - latency = 1 + AssertLatency(); - } - return latency + 1 + AdduLatency(false) + CallLatency(); - } - case kArchPrepareCallCFunction: - return PrepareCallCFunctionLatency(); - case kArchSaveCallerRegisters: { - auto fp_mode = - static_cast(MiscField::decode(instr->opcode())); - return PushCallerSavedLatency(fp_mode); - } - case kArchRestoreCallerRegisters: { - auto fp_mode = - static_cast(MiscField::decode(instr->opcode())); - return PopCallerSavedLatency(fp_mode); - } - case kArchPrepareTailCall: - return 2; // Estimated max. - case kArchCallCFunction: - return CallCFunctionLatency(); - case kArchJmp: - return AssembleArchJumpLatency(); - case kArchBinarySearchSwitch: - return AssembleArchBinarySearchSwitchLatency((instr->InputCount() - 2) / - 2); - case kArchTableSwitch: - return AssembleArchTableSwitchLatency(); - case kArchAbortCSADcheck: - return CallLatency() + 1; - case kArchComment: - case kArchDeoptimize: - return 0; - case kArchRet: - return AssembleReturnLatency(); - case kArchTruncateDoubleToI: - return TruncateDoubleToIDelayedLatency(); - case kArchStoreWithWriteBarrier: - return AdduLatency() + 1 + CheckPageFlagLatency(); - case kArchStackSlot: { - // Estimated max. - return AdduLatency(false) + AndLatency(false) + AssertLatency() + - AdduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 + - SubuLatency() + AdduLatency(); - } - case kIeee754Float64Acos: - case kIeee754Float64Acosh: - case kIeee754Float64Asin: - case kIeee754Float64Asinh: - case kIeee754Float64Atan: - case kIeee754Float64Atanh: - case kIeee754Float64Atan2: - case kIeee754Float64Cos: - case kIeee754Float64Cosh: - case kIeee754Float64Cbrt: - case kIeee754Float64Exp: - case kIeee754Float64Expm1: - case kIeee754Float64Log: - case kIeee754Float64Log1p: - case kIeee754Float64Log10: - case kIeee754Float64Log2: - case kIeee754Float64Pow: - case kIeee754Float64Sin: - case kIeee754Float64Sinh: - case kIeee754Float64Tan: - case kIeee754Float64Tanh: - return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + - CallCFunctionLatency() + MovFromFloatResultLatency(); - case kMipsAdd: - return AdduLatency(instr->InputAt(1)->IsRegister()); - case kMipsAnd: - return AndLatency(instr->InputAt(1)->IsRegister()); - case kMipsOr: - return OrLatency(instr->InputAt(1)->IsRegister()); - case kMipsXor: - return XorLatency(instr->InputAt(1)->IsRegister()); - case kMipsSub: - return SubuLatency(instr->InputAt(1)->IsRegister()); - case kMipsNor: - return NorLatency(instr->InputAt(1)->IsRegister()); - case kMipsAddOvf: - return AddOverflowLatency(); - case kMipsSubOvf: - return SubOverflowLatency(); - case kMipsMul: - return MulLatency(false); - case kMipsMulHigh: - return MulhLatency(instr->InputAt(1)->IsRegister()); - case kMipsMulHighU: - return MulhuLatency(instr->InputAt(1)->IsRegister()); - case kMipsMulOvf: - return MulOverflowLatency(); - case kMipsMod: - return ModLatency(instr->InputAt(1)->IsRegister()); - case kMipsModU: - return ModuLatency(instr->InputAt(1)->IsRegister()); - case kMipsDiv: { - int latency = DivLatency(instr->InputAt(1)->IsRegister()); - if (IsMipsArchVariant(kMips32r6)) { - return latency++; - } else { - return latency + MovzLatency(); - } - } - case kMipsDivU: { - int latency = DivuLatency(instr->InputAt(1)->IsRegister()); - if (IsMipsArchVariant(kMips32r6)) { - return latency++; - } else { - return latency + MovzLatency(); - } - } - case kMipsClz: - return ClzLatency(); - case kMipsCtz: - return CtzLatency(); - case kMipsPopcnt: - return PopcntLatency(); - case kMipsShlPair: { - if (instr->InputAt(2)->IsRegister()) { - return ShlPairLatency(); - } else { - return ShlPairLatency(false); - } - } - case kMipsShrPair: { - if (instr->InputAt(2)->IsRegister()) { - return ShrPairLatency(); - } else { - // auto immediate_operand = ImmediateOperand::cast(instr->InputAt(2)); - // return ShrPairLatency(false, immediate_operand->inline_32_value()); - return 1; - } - } - case kMipsSarPair: { - if (instr->InputAt(2)->IsRegister()) { - return SarPairLatency(); - } else { - return SarPairLatency(false); - } - } - case kMipsExt: - return ExtLatency(); - case kMipsIns: - return InsLatency(); - case kMipsRor: - return RorLatency(instr->InputAt(1)->IsRegister()); - case kMipsLsa: - return LsaLatency(); - case kMipsModD: - return PrepareCallCFunctionLatency() + MovToFloatParametersLatency() + - CallCFunctionLatency() + MovFromFloatResultLatency(); - case kMipsAddPair: - return AddPairLatency(); - case kMipsSubPair: - return SubPairLatency(); - case kMipsMulPair: - return MulPairLatency(); - case kMipsMaddS: - return MaddSLatency(); - case kMipsMaddD: - return MaddDLatency(); - case kMipsMsubS: - return MsubSLatency(); - case kMipsMsubD: - return MsubDLatency(); - case kMipsNegS: - return Neg_sLatency(); - case kMipsNegD: - return Neg_dLatency(); - case kMipsFloat64RoundDown: - case kMipsFloat64RoundTruncate: - case kMipsFloat64RoundUp: - case kMipsFloat64RoundTiesEven: - return Float64RoundLatency(); - case kMipsFloat32RoundDown: - case kMipsFloat32RoundTruncate: - case kMipsFloat32RoundUp: - case kMipsFloat32RoundTiesEven: - return Float32RoundLatency(); - case kMipsFloat32Max: - return Float32MaxLatency(); - case kMipsFloat64Max: - return Float64MaxLatency(); - case kMipsFloat32Min: - return Float32MinLatency(); - case kMipsFloat64Min: - return Float64MinLatency(); - case kMipsCvtSUw: - return CvtSUwLatency(); - case kMipsCvtDUw: - return CvtDUwLatency(); - case kMipsFloorWD: - return FloorWDLatency(); - case kMipsCeilWD: - return CeilWDLatency(); - case kMipsRoundWD: - return RoundWDLatency(); - case kMipsTruncWD: - return Trunc_w_dLatency() + Latency::MFC1; - case kMipsTruncWS: - return Latency::TRUNC_W_S + Latency::MFC1 + AdduLatency(false) + - SltLatency() + MovnLatency(); - case kMipsTruncUwD: - return Trunc_uw_dLatency(); - case kMipsTruncUwS: - return Trunc_uw_sLatency() + AdduLatency(false) + MovzLatency(); - case kMipsFloat64ExtractLowWord32: - return Latency::MFC1; - case kMipsFloat64ExtractHighWord32: - return Mfhc1Latency(); - case kMipsFloat64InsertLowWord32: { - if (IsFp32Mode()) { - return Latency::MTC1; - } else { - return Latency::MFHC1 + Latency::MTC1 + Latency::MTHC1; - } - } - case kMipsFloat64InsertHighWord32: - return Mthc1Latency(); - case kMipsFloat64SilenceNaN: - return Latency::SUB_D; - case kMipsSeb: - return SebLatency(); - case kMipsSeh: - return SehLatency(); - case kMipsUlhu: - return UlhuLatency(); - case kMipsUlh: - return UlhLatency(); - case kMipsUsh: - return UshLatency(); - case kMipsUlw: - return UlwLatency(); - case kMipsUsw: - return UswLatency(); - case kMipsUlwc1: - return Ulwc1Latency(); - case kMipsSwc1: - return MoveLatency(false) + Latency::SWC1; // Estimated max. - case kMipsUswc1: - return MoveLatency(false) + Uswc1Latency(); // Estimated max. - case kMipsLdc1: - return Ldc1Latency(); - case kMipsUldc1: - return Uldc1Latency(); - case kMipsSdc1: - return MoveLatency(false) + Sdc1Latency(); // Estimated max. - case kMipsUsdc1: - return MoveLatency(false) + Usdc1Latency(); // Estimated max. - case kMipsPush: { - if (instr->InputAt(0)->IsFPRegister()) { - auto op = LocationOperand::cast(instr->InputAt(0)); - switch (op->representation()) { - case MachineRepresentation::kFloat32: - return Latency::SWC1 + SubuLatency(false); - case MachineRepresentation::kFloat64: - return Sdc1Latency() + SubuLatency(false); - default: { - UNREACHABLE(); - } - } - } else { - return PushRegisterLatency(); - } - } - case kMipsPeek: { - if (instr->OutputAt(0)->IsFPRegister()) { - auto op = LocationOperand::cast(instr->OutputAt(0)); - if (op->representation() == MachineRepresentation::kFloat64) { - return Ldc1Latency(); - } else { - return Latency::LWC1; - } - } else { - return 1; - } - } - case kMipsStackClaim: - return SubuLatency(false); - case kMipsStoreToStackSlot: { - if (instr->InputAt(0)->IsFPRegister()) { - auto op = LocationOperand::cast(instr->InputAt(0)); - if (op->representation() == MachineRepresentation::kFloat64) { - return Sdc1Latency(); - } else if (op->representation() == MachineRepresentation::kFloat32) { - return Latency::SWC1; - } else { - return 1; // Estimated value. - } - } else { - return 1; - } - } - case kMipsByteSwap32: - return ByteSwapSignedLatency(); - case kAtomicLoadInt8: - case kAtomicLoadUint8: - case kAtomicLoadInt16: - case kAtomicLoadUint16: - case kAtomicLoadWord32: - return 2; - case kAtomicStoreWord8: - case kAtomicStoreWord16: - case kAtomicStoreWord32: - return 3; - case kAtomicExchangeInt8: - return Word32AtomicExchangeLatency(true, 8); - case kAtomicExchangeUint8: - return Word32AtomicExchangeLatency(false, 8); - case kAtomicExchangeInt16: - return Word32AtomicExchangeLatency(true, 16); - case kAtomicExchangeUint16: - return Word32AtomicExchangeLatency(false, 16); - case kAtomicExchangeWord32: { - return 1 + AdduLatency() + Ldc1Latency() + 1 + ScLatency(0) + - BranchShortLatency() + 1; - } - case kAtomicCompareExchangeInt8: - return Word32AtomicCompareExchangeLatency(true, 8); - case kAtomicCompareExchangeUint8: - return Word32AtomicCompareExchangeLatency(false, 8); - case kAtomicCompareExchangeInt16: - return Word32AtomicCompareExchangeLatency(true, 16); - case kAtomicCompareExchangeUint16: - return Word32AtomicCompareExchangeLatency(false, 16); - case kAtomicCompareExchangeWord32: - return AdduLatency() + 1 + LlLatency(0) + BranchShortLatency() + 1; - case kMipsTst: - return AndLatency(instr->InputAt(1)->IsRegister()); - case kMipsCmpS: - return MoveLatency() + CompareF32Latency(); - case kMipsCmpD: - return MoveLatency() + CompareF64Latency(); - case kArchNop: - case kArchThrowTerminator: - case kMipsCmp: - return 0; - case kArchDebugBreak: - case kArchFramePointer: - case kArchParentFramePointer: - case kMipsShl: - case kMipsShr: - case kMipsSar: - case kMipsMov: - case kMipsMaxS: - case kMipsMinS: - case kMipsMaxD: - case kMipsMinD: - case kMipsLbu: - case kMipsLb: - case kMipsSb: - case kMipsLhu: - case kMipsLh: - case kMipsSh: - case kMipsLw: - case kMipsSw: - case kMipsLwc1: - return 1; - case kMipsAddS: - return Latency::ADD_S; - case kMipsSubS: - return Latency::SUB_S; - case kMipsMulS: - return Latency::MUL_S; - case kMipsAbsS: - return Latency::ABS_S; - case kMipsAddD: - return Latency::ADD_D; - case kMipsSubD: - return Latency::SUB_D; - case kMipsAbsD: - return Latency::ABS_D; - case kMipsCvtSD: - return Latency::CVT_S_D; - case kMipsCvtDS: - return Latency::CVT_D_S; - case kMipsMulD: - return Latency::MUL_D; - case kMipsFloorWS: - return Latency::FLOOR_W_S; - case kMipsCeilWS: - return Latency::CEIL_W_S; - case kMipsRoundWS: - return Latency::ROUND_W_S; - case kMipsCvtDW: - return Latency::CVT_D_W; - case kMipsCvtSW: - return Latency::CVT_S_W; - case kMipsDivS: - return Latency::DIV_S; - case kMipsSqrtS: - return Latency::SQRT_S; - case kMipsDivD: - return Latency::DIV_D; - case kMipsSqrtD: - return Latency::SQRT_D; - default: - return 1; - } -} - -} // namespace compiler -} // namespace internal -} // namespace v8 diff --git a/src/compiler/backend/mips/instruction-selector-mips.cc b/src/compiler/backend/mips/instruction-selector-mips.cc deleted file mode 100644 index d9c3f9cae6..0000000000 --- a/src/compiler/backend/mips/instruction-selector-mips.cc +++ /dev/null @@ -1,2573 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/base/bits.h" -#include "src/compiler/backend/instruction-selector-impl.h" -#include "src/compiler/node-matchers.h" -#include "src/compiler/node-properties.h" - -namespace v8 { -namespace internal { -namespace compiler { - -#define TRACE_UNIMPL() \ - PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) - -#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__) - -// Adds Mips-specific methods for generating InstructionOperands. -class MipsOperandGenerator final : public OperandGenerator { - public: - explicit MipsOperandGenerator(InstructionSelector* selector) - : OperandGenerator(selector) {} - - InstructionOperand UseOperand(Node* node, InstructionCode opcode) { - if (CanBeImmediate(node, opcode)) { - return UseImmediate(node); - } - return UseRegister(node); - } - - // Use the zero register if the node has the immediate value zero, otherwise - // assign a register. - InstructionOperand UseRegisterOrImmediateZero(Node* node) { - if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) || - (IsFloatConstant(node) && - (base::bit_cast(GetFloatConstantValue(node)) == 0))) { - return UseImmediate(node); - } - return UseRegister(node); - } - - bool IsIntegerConstant(Node* node) { - return (node->opcode() == IrOpcode::kInt32Constant); - } - - int64_t GetIntegerConstantValue(Node* node) { - DCHECK_EQ(IrOpcode::kInt32Constant, node->opcode()); - return OpParameter(node->op()); - } - - bool IsFloatConstant(Node* node) { - return (node->opcode() == IrOpcode::kFloat32Constant) || - (node->opcode() == IrOpcode::kFloat64Constant); - } - - double GetFloatConstantValue(Node* node) { - if (node->opcode() == IrOpcode::kFloat32Constant) { - return OpParameter(node->op()); - } - DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode()); - return OpParameter(node->op()); - } - - bool CanBeImmediate(Node* node, InstructionCode opcode) { - Int32Matcher m(node); - if (!m.HasResolvedValue()) return false; - int32_t value = m.ResolvedValue(); - switch (ArchOpcodeField::decode(opcode)) { - case kMipsShl: - case kMipsSar: - case kMipsShr: - return is_uint5(value); - case kMipsAdd: - case kMipsAnd: - case kMipsOr: - case kMipsTst: - case kMipsSub: - case kMipsXor: - return is_uint16(value); - case kMipsLb: - case kMipsLbu: - case kMipsSb: - case kMipsLh: - case kMipsLhu: - case kMipsSh: - case kMipsLw: - case kMipsSw: - case kMipsLwc1: - case kMipsSwc1: - case kMipsLdc1: - case kMipsSdc1: - // true even for 32b values, offsets > 16b - // are handled in assembler-mips.cc - return is_int32(value); - default: - return is_int16(value); - } - } - - private: - bool ImmediateFitsAddrMode1Instruction(int32_t imm) const { - TRACE_UNIMPL(); - return false; - } -}; - -static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, - Node* node) { - MipsOperandGenerator g(selector); - selector->Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), - g.UseRegister(node->InputAt(1))); -} - -static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode, - Node* node) { - MipsOperandGenerator g(selector); - selector->Emit(opcode, g.DefineAsRegister(node), - g.UseUniqueRegister(node->InputAt(0)), - g.UseUniqueRegister(node->InputAt(1))); -} - -void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) { - MipsOperandGenerator g(selector); - selector->Emit( - opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)), - g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); -} - -static void VisitRR(InstructionSelector* selector, ArchOpcode opcode, - Node* node) { - MipsOperandGenerator g(selector); - selector->Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); -} - -static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, - Node* node) { - MipsOperandGenerator g(selector); - int32_t imm = OpParameter(node->op()); - selector->Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseImmediate(imm)); -} - -static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, - Node* node) { - MipsOperandGenerator g(selector); - int32_t imm = OpParameter(node->op()); - selector->Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseImmediate(imm), - g.UseRegister(node->InputAt(1))); -} - -static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, - Node* node) { - MipsOperandGenerator g(selector); - selector->Emit(opcode, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), - g.UseOperand(node->InputAt(1), opcode)); -} - -bool TryMatchImmediate(InstructionSelector* selector, - InstructionCode* opcode_return, Node* node, - size_t* input_count_return, InstructionOperand* inputs) { - MipsOperandGenerator g(selector); - if (g.CanBeImmediate(node, *opcode_return)) { - *opcode_return |= AddressingModeField::encode(kMode_MRI); - inputs[0] = g.UseImmediate(node); - *input_count_return = 1; - return true; - } - return false; -} - -static void VisitBinop(InstructionSelector* selector, Node* node, - InstructionCode opcode, bool has_reverse_opcode, - InstructionCode reverse_opcode, - FlagsContinuation* cont) { - MipsOperandGenerator g(selector); - Int32BinopMatcher m(node); - InstructionOperand inputs[2]; - size_t input_count = 0; - InstructionOperand outputs[1]; - size_t output_count = 0; - - if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count, - &inputs[1])) { - inputs[0] = g.UseRegister(m.left().node()); - input_count++; - } else if (has_reverse_opcode && - TryMatchImmediate(selector, &reverse_opcode, m.left().node(), - &input_count, &inputs[1])) { - inputs[0] = g.UseRegister(m.right().node()); - opcode = reverse_opcode; - input_count++; - } else { - inputs[input_count++] = g.UseRegister(m.left().node()); - inputs[input_count++] = g.UseOperand(m.right().node(), opcode); - } - - if (cont->IsDeoptimize()) { - // If we can deoptimize as a result of the binop, we need to make sure that - // the deopt inputs are not overwritten by the binop result. One way - // to achieve that is to declare the output register as same-as-first. - outputs[output_count++] = g.DefineSameAsFirst(node); - } else { - outputs[output_count++] = g.DefineAsRegister(node); - } - - DCHECK_NE(0u, input_count); - DCHECK_EQ(1u, output_count); - DCHECK_GE(arraysize(inputs), input_count); - DCHECK_GE(arraysize(outputs), output_count); - - selector->EmitWithContinuation(opcode, output_count, outputs, input_count, - inputs, cont); -} - -static void VisitBinop(InstructionSelector* selector, Node* node, - InstructionCode opcode, bool has_reverse_opcode, - InstructionCode reverse_opcode) { - FlagsContinuation cont; - VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont); -} - -static void VisitBinop(InstructionSelector* selector, Node* node, - InstructionCode opcode, FlagsContinuation* cont) { - VisitBinop(selector, node, opcode, false, kArchNop, cont); -} - -static void VisitBinop(InstructionSelector* selector, Node* node, - InstructionCode opcode) { - VisitBinop(selector, node, opcode, false, kArchNop); -} - -static void VisitPairAtomicBinop(InstructionSelector* selector, Node* node, - ArchOpcode opcode) { - MipsOperandGenerator g(selector); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - Node* value_high = node->InputAt(3); - AddressingMode addressing_mode = kMode_None; - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); - InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index), - g.UseFixed(value, a1), - g.UseFixed(value_high, a2)}; - InstructionOperand outputs[2]; - size_t output_count = 0; - InstructionOperand temps[3]; - size_t temp_count = 0; - temps[temp_count++] = g.TempRegister(a0); - - Node* projection0 = NodeProperties::FindProjection(node, 0); - Node* projection1 = NodeProperties::FindProjection(node, 1); - if (projection0) { - outputs[output_count++] = g.DefineAsFixed(projection0, v0); - } else { - temps[temp_count++] = g.TempRegister(v0); - } - if (projection1) { - outputs[output_count++] = g.DefineAsFixed(projection1, v1); - } else { - temps[temp_count++] = g.TempRegister(v1); - } - selector->Emit(code, output_count, outputs, arraysize(inputs), inputs, - temp_count, temps); -} - -void InstructionSelector::VisitStackSlot(Node* node) { - StackSlotRepresentation rep = StackSlotRepresentationOf(node->op()); - int alignment = rep.alignment(); - int slot = frame_->AllocateSpillSlot(rep.size(), alignment); - OperandGenerator g(this); - - Emit(kArchStackSlot, g.DefineAsRegister(node), - sequence()->AddImmediate(Constant(slot)), 0, nullptr); -} - -void InstructionSelector::VisitAbortCSADcheck(Node* node) { - MipsOperandGenerator g(this); - Emit(kArchAbortCSADcheck, g.NoOutput(), g.UseFixed(node->InputAt(0), a0)); -} - -void InstructionSelector::VisitLoadTransform(Node* node) { - LoadTransformParameters params = LoadTransformParametersOf(node->op()); - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - - InstructionCode opcode = kArchNop; - switch (params.transformation) { - case LoadTransformation::kS128Load8Splat: - opcode = kMipsS128Load8Splat; - break; - case LoadTransformation::kS128Load16Splat: - opcode = kMipsS128Load16Splat; - break; - case LoadTransformation::kS128Load32Splat: - opcode = kMipsS128Load32Splat; - break; - case LoadTransformation::kS128Load64Splat: - opcode = kMipsS128Load64Splat; - break; - case LoadTransformation::kS128Load8x8S: - opcode = kMipsS128Load8x8S; - break; - case LoadTransformation::kS128Load8x8U: - opcode = kMipsS128Load8x8U; - break; - case LoadTransformation::kS128Load16x4S: - opcode = kMipsS128Load16x4S; - break; - case LoadTransformation::kS128Load16x4U: - opcode = kMipsS128Load16x4U; - break; - case LoadTransformation::kS128Load32x2S: - opcode = kMipsS128Load32x2S; - break; - case LoadTransformation::kS128Load32x2U: - opcode = kMipsS128Load32x2U; - break; - default: - UNIMPLEMENTED(); - } - - if (g.CanBeImmediate(index, opcode)) { - Emit(opcode | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); - } else { - InstructionOperand addr_reg = g.TempRegister(); - Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, - g.UseRegister(index), g.UseRegister(base)); - // Emit desired load opcode, using temp addr_reg. - Emit(opcode | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); - } -} - -void InstructionSelector::VisitLoad(Node* node) { - LoadRepresentation load_rep = LoadRepresentationOf(node->op()); - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - - InstructionCode opcode = kArchNop; - switch (load_rep.representation()) { - case MachineRepresentation::kFloat32: - opcode = kMipsLwc1; - break; - case MachineRepresentation::kFloat64: - opcode = kMipsLdc1; - break; - case MachineRepresentation::kBit: // Fall through. - case MachineRepresentation::kWord8: - opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb; - break; - case MachineRepresentation::kWord16: - opcode = load_rep.IsUnsigned() ? kMipsLhu : kMipsLh; - break; - case MachineRepresentation::kTaggedSigned: // Fall through. - case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. - case MachineRepresentation::kWord32: - opcode = kMipsLw; - break; - case MachineRepresentation::kSimd128: - opcode = kMipsMsaLd; - break; - case MachineRepresentation::kSimd256: // Fall through. - case MachineRepresentation::kCompressedPointer: // Fall through. - case MachineRepresentation::kCompressed: // Fall through. - case MachineRepresentation::kSandboxedPointer: // Fall through. - case MachineRepresentation::kWord64: // Fall through. - case MachineRepresentation::kMapWord: // Fall through. - case MachineRepresentation::kNone: - UNREACHABLE(); - } - - if (g.CanBeImmediate(index, opcode)) { - Emit(opcode | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); - } else { - InstructionOperand addr_reg = g.TempRegister(); - Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, - g.UseRegister(index), g.UseRegister(base)); - // Emit desired load opcode, using temp addr_reg. - Emit(opcode | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); - } -} - -void InstructionSelector::VisitProtectedLoad(Node* node) { - // TODO(eholk) - UNIMPLEMENTED(); -} - -void InstructionSelector::VisitStore(Node* node) { - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - - StoreRepresentation store_rep = StoreRepresentationOf(node->op()); - WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind(); - MachineRepresentation rep = store_rep.representation(); - - if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) { - write_barrier_kind = kFullWriteBarrier; - } - - // TODO(mips): I guess this could be done in a better way. - if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) { - DCHECK(CanBeTaggedPointer(rep)); - InstructionOperand inputs[3]; - size_t input_count = 0; - inputs[input_count++] = g.UseUniqueRegister(base); - inputs[input_count++] = g.UseUniqueRegister(index); - inputs[input_count++] = g.UseUniqueRegister(value); - RecordWriteMode record_write_mode = - WriteBarrierKindToRecordWriteMode(write_barrier_kind); - InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; - size_t const temp_count = arraysize(temps); - InstructionCode code = kArchStoreWithWriteBarrier; - code |= MiscField::encode(static_cast(record_write_mode)); - Emit(code, 0, nullptr, input_count, inputs, temp_count, temps); - } else { - ArchOpcode opcode; - switch (rep) { - case MachineRepresentation::kFloat32: - opcode = kMipsSwc1; - break; - case MachineRepresentation::kFloat64: - opcode = kMipsSdc1; - break; - case MachineRepresentation::kBit: // Fall through. - case MachineRepresentation::kWord8: - opcode = kMipsSb; - break; - case MachineRepresentation::kWord16: - opcode = kMipsSh; - break; - case MachineRepresentation::kTaggedSigned: // Fall through. - case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. - case MachineRepresentation::kWord32: - opcode = kMipsSw; - break; - case MachineRepresentation::kSimd128: - opcode = kMipsMsaSt; - break; - case MachineRepresentation::kSimd256: // Fall through. - case MachineRepresentation::kCompressedPointer: // Fall through. - case MachineRepresentation::kCompressed: // Fall through. - case MachineRepresentation::kSandboxedPointer: // Fall through. - case MachineRepresentation::kWord64: // Fall through. - case MachineRepresentation::kMapWord: // Fall through. - case MachineRepresentation::kNone: - UNREACHABLE(); - } - - if (g.CanBeImmediate(index, opcode)) { - Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), - g.UseRegister(base), g.UseImmediate(index), - g.UseRegisterOrImmediateZero(value)); - } else { - InstructionOperand addr_reg = g.TempRegister(); - Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, - g.UseRegister(index), g.UseRegister(base)); - // Emit desired store opcode, using temp addr_reg. - Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), - addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); - } - } -} - -void InstructionSelector::VisitProtectedStore(Node* node) { - // TODO(eholk) - UNIMPLEMENTED(); -} - -void InstructionSelector::VisitLoadLane(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitStoreLane(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitWord32And(Node* node) { - MipsOperandGenerator g(this); - Int32BinopMatcher m(node); - if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) && - m.right().HasResolvedValue()) { - uint32_t mask = m.right().ResolvedValue(); - uint32_t mask_width = base::bits::CountPopulation(mask); - uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); - if ((mask_width != 0) && (mask_msb + mask_width == 32)) { - // The mask must be contiguous, and occupy the least-significant bits. - DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); - - // Select Ext for And(Shr(x, imm), mask) where the mask is in the least - // significant bits. - Int32BinopMatcher mleft(m.left().node()); - if (mleft.right().HasResolvedValue()) { - // Any shift value can match; int32 shifts use `value % 32`. - uint32_t lsb = mleft.right().ResolvedValue() & 0x1F; - - // Ext cannot extract bits past the register size, however since - // shifting the original value would have introduced some zeros we can - // still use Ext with a smaller mask and the remaining bits will be - // zeros. - if (lsb + mask_width > 32) mask_width = 32 - lsb; - - if (lsb == 0 && mask_width == 32) { - Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node())); - } else { - Emit(kMipsExt, g.DefineAsRegister(node), - g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), - g.TempImmediate(mask_width)); - } - return; - } - // Other cases fall through to the normal And operation. - } - } - if (m.right().HasResolvedValue()) { - uint32_t mask = m.right().ResolvedValue(); - uint32_t shift = base::bits::CountPopulation(~mask); - uint32_t msb = base::bits::CountLeadingZeros32(~mask); - if (shift != 0 && shift != 32 && msb + shift == 32) { - // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction - // and remove constant loading of invereted mask. - Emit(kMipsIns, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), - g.TempImmediate(0), g.TempImmediate(shift)); - return; - } - } - VisitBinop(this, node, kMipsAnd, true, kMipsAnd); -} - -void InstructionSelector::VisitWord32Or(Node* node) { - VisitBinop(this, node, kMipsOr, true, kMipsOr); -} - -void InstructionSelector::VisitWord32Xor(Node* node) { - Int32BinopMatcher m(node); - if (m.left().IsWord32Or() && CanCover(node, m.left().node()) && - m.right().Is(-1)) { - Int32BinopMatcher mleft(m.left().node()); - if (!mleft.right().HasResolvedValue()) { - MipsOperandGenerator g(this); - Emit(kMipsNor, g.DefineAsRegister(node), - g.UseRegister(mleft.left().node()), - g.UseRegister(mleft.right().node())); - return; - } - } - if (m.right().Is(-1)) { - // Use Nor for bit negation and eliminate constant loading for xori. - MipsOperandGenerator g(this); - Emit(kMipsNor, g.DefineAsRegister(node), g.UseRegister(m.left().node()), - g.TempImmediate(0)); - return; - } - VisitBinop(this, node, kMipsXor, true, kMipsXor); -} - -void InstructionSelector::VisitWord32Shl(Node* node) { - Int32BinopMatcher m(node); - if (m.left().IsWord32And() && CanCover(node, m.left().node()) && - m.right().IsInRange(1, 31)) { - MipsOperandGenerator g(this); - Int32BinopMatcher mleft(m.left().node()); - // Match Word32Shl(Word32And(x, mask), imm) to Shl where the mask is - // contiguous, and the shift immediate non-zero. - if (mleft.right().HasResolvedValue()) { - uint32_t mask = mleft.right().ResolvedValue(); - uint32_t mask_width = base::bits::CountPopulation(mask); - uint32_t mask_msb = base::bits::CountLeadingZeros32(mask); - if ((mask_width != 0) && (mask_msb + mask_width == 32)) { - uint32_t shift = m.right().ResolvedValue(); - DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask)); - DCHECK_NE(0u, shift); - if ((shift + mask_width) >= 32) { - // If the mask is contiguous and reaches or extends beyond the top - // bit, only the shift is needed. - Emit(kMipsShl, g.DefineAsRegister(node), - g.UseRegister(mleft.left().node()), - g.UseImmediate(m.right().node())); - return; - } - } - } - } - VisitRRO(this, kMipsShl, node); -} - -void InstructionSelector::VisitWord32Shr(Node* node) { - Int32BinopMatcher m(node); - if (m.left().IsWord32And() && m.right().HasResolvedValue()) { - uint32_t lsb = m.right().ResolvedValue() & 0x1F; - Int32BinopMatcher mleft(m.left().node()); - if (mleft.right().HasResolvedValue() && - mleft.right().ResolvedValue() != 0) { - // Select Ext for Shr(And(x, mask), imm) where the result of the mask is - // shifted into the least-significant bits. - uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb; - unsigned mask_width = base::bits::CountPopulation(mask); - unsigned mask_msb = base::bits::CountLeadingZeros32(mask); - if ((mask_msb + mask_width + lsb) == 32) { - MipsOperandGenerator g(this); - DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask)); - Emit(kMipsExt, g.DefineAsRegister(node), - g.UseRegister(mleft.left().node()), g.TempImmediate(lsb), - g.TempImmediate(mask_width)); - return; - } - } - } - VisitRRO(this, kMipsShr, node); -} - -void InstructionSelector::VisitWord32Sar(Node* node) { - Int32BinopMatcher m(node); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - m.left().IsWord32Shl() && CanCover(node, m.left().node())) { - Int32BinopMatcher mleft(m.left().node()); - if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) { - MipsOperandGenerator g(this); - uint32_t sar = m.right().ResolvedValue(); - uint32_t shl = mleft.right().ResolvedValue(); - if ((sar == shl) && (sar == 16)) { - Emit(kMipsSeh, g.DefineAsRegister(node), - g.UseRegister(mleft.left().node())); - return; - } else if ((sar == shl) && (sar == 24)) { - Emit(kMipsSeb, g.DefineAsRegister(node), - g.UseRegister(mleft.left().node())); - return; - } - } - } - VisitRRO(this, kMipsSar, node); -} - -static void VisitInt32PairBinop(InstructionSelector* selector, - InstructionCode pair_opcode, - InstructionCode single_opcode, Node* node) { - MipsOperandGenerator g(selector); - - Node* projection1 = NodeProperties::FindProjection(node, 1); - - if (projection1) { - // We use UseUniqueRegister here to avoid register sharing with the output - // register. - InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)), - g.UseUniqueRegister(node->InputAt(1)), - g.UseUniqueRegister(node->InputAt(2)), - g.UseUniqueRegister(node->InputAt(3))}; - - InstructionOperand outputs[] = { - g.DefineAsRegister(node), - g.DefineAsRegister(NodeProperties::FindProjection(node, 1))}; - selector->Emit(pair_opcode, 2, outputs, 4, inputs); - } else { - // The high word of the result is not used, so we emit the standard 32 bit - // instruction. - selector->Emit(single_opcode, g.DefineSameAsFirst(node), - g.UseRegister(node->InputAt(0)), - g.UseRegister(node->InputAt(2))); - } -} - -void InstructionSelector::VisitInt32PairAdd(Node* node) { - VisitInt32PairBinop(this, kMipsAddPair, kMipsAdd, node); -} - -void InstructionSelector::VisitInt32PairSub(Node* node) { - VisitInt32PairBinop(this, kMipsSubPair, kMipsSub, node); -} - -void InstructionSelector::VisitInt32PairMul(Node* node) { - VisitInt32PairBinop(this, kMipsMulPair, kMipsMul, node); -} - -// Shared routine for multiple shift operations. -static void VisitWord32PairShift(InstructionSelector* selector, - InstructionCode opcode, Node* node) { - MipsOperandGenerator g(selector); - Int32Matcher m(node->InputAt(2)); - InstructionOperand shift_operand; - if (m.HasResolvedValue()) { - shift_operand = g.UseImmediate(m.node()); - } else { - shift_operand = g.UseUniqueRegister(m.node()); - } - - // We use UseUniqueRegister here to avoid register sharing with the output - // register. - InstructionOperand inputs[] = {g.UseUniqueRegister(node->InputAt(0)), - g.UseUniqueRegister(node->InputAt(1)), - shift_operand}; - - Node* projection1 = NodeProperties::FindProjection(node, 1); - - InstructionOperand outputs[2]; - InstructionOperand temps[1]; - int32_t output_count = 0; - int32_t temp_count = 0; - - outputs[output_count++] = g.DefineAsRegister(node); - if (projection1) { - outputs[output_count++] = g.DefineAsRegister(projection1); - } else { - temps[temp_count++] = g.TempRegister(); - } - - selector->Emit(opcode, output_count, outputs, 3, inputs, temp_count, temps); -} - -void InstructionSelector::VisitWord32PairShl(Node* node) { - VisitWord32PairShift(this, kMipsShlPair, node); -} - -void InstructionSelector::VisitWord32PairShr(Node* node) { - VisitWord32PairShift(this, kMipsShrPair, node); -} - -void InstructionSelector::VisitWord32PairSar(Node* node) { - VisitWord32PairShift(this, kMipsSarPair, node); -} - -void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); } - -void InstructionSelector::VisitWord32Ror(Node* node) { - VisitRRO(this, kMipsRor, node); -} - -void InstructionSelector::VisitWord32Clz(Node* node) { - VisitRR(this, kMipsClz, node); -} - -void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) { - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - ArchOpcode opcode = kMipsWord32AtomicPairLoad; - AddressingMode addressing_mode = kMode_MRI; - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); - InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)}; - InstructionOperand temps[3]; - size_t temp_count = 0; - temps[temp_count++] = g.TempRegister(a0); - InstructionOperand outputs[2]; - size_t output_count = 0; - - Node* projection0 = NodeProperties::FindProjection(node, 0); - Node* projection1 = NodeProperties::FindProjection(node, 1); - if (projection0) { - outputs[output_count++] = g.DefineAsFixed(projection0, v0); - } else { - temps[temp_count++] = g.TempRegister(v0); - } - if (projection1) { - outputs[output_count++] = g.DefineAsFixed(projection1, v1); - } else { - temps[temp_count++] = g.TempRegister(v1); - } - Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count, - temps); -} - -void InstructionSelector::VisitWord32AtomicPairStore(Node* node) { - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value_low = node->InputAt(2); - Node* value_high = node->InputAt(3); - - InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index), - g.UseFixed(value_low, a1), - g.UseFixed(value_high, a2)}; - InstructionOperand temps[] = {g.TempRegister(a0), g.TempRegister(), - g.TempRegister()}; - Emit(kMipsWord32AtomicPairStore | AddressingModeField::encode(kMode_MRI), 0, - nullptr, arraysize(inputs), inputs, arraysize(temps), temps); -} - -void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) { - VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAdd); -} - -void InstructionSelector::VisitWord32AtomicPairSub(Node* node) { - VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairSub); -} - -void InstructionSelector::VisitWord32AtomicPairAnd(Node* node) { - VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairAnd); -} - -void InstructionSelector::VisitWord32AtomicPairOr(Node* node) { - VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairOr); -} - -void InstructionSelector::VisitWord32AtomicPairXor(Node* node) { - VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairXor); -} - -void InstructionSelector::VisitWord32AtomicPairExchange(Node* node) { - VisitPairAtomicBinop(this, node, kMipsWord32AtomicPairExchange); -} - -void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) { - MipsOperandGenerator g(this); - InstructionOperand inputs[] = { - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), - g.UseFixed(node->InputAt(2), a1), g.UseFixed(node->InputAt(3), a2), - g.UseFixed(node->InputAt(4), a3), g.UseUniqueRegister(node->InputAt(5))}; - - InstructionCode code = kMipsWord32AtomicPairCompareExchange | - AddressingModeField::encode(kMode_MRI); - Node* projection0 = NodeProperties::FindProjection(node, 0); - Node* projection1 = NodeProperties::FindProjection(node, 1); - InstructionOperand outputs[2]; - size_t output_count = 0; - InstructionOperand temps[3]; - size_t temp_count = 0; - temps[temp_count++] = g.TempRegister(a0); - if (projection0) { - outputs[output_count++] = g.DefineAsFixed(projection0, v0); - } else { - temps[temp_count++] = g.TempRegister(v0); - } - if (projection1) { - outputs[output_count++] = g.DefineAsFixed(projection1, v1); - } else { - temps[temp_count++] = g.TempRegister(v1); - } - Emit(code, output_count, outputs, arraysize(inputs), inputs, temp_count, - temps); -} - -void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); } - -void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); } - -void InstructionSelector::VisitWord32ReverseBytes(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsByteSwap32, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitSimd128ReverseBytes(Node* node) { - UNREACHABLE(); -} - -void InstructionSelector::VisitWord32Ctz(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsCtz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitWord32Popcnt(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsPopcnt, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitInt32Add(Node* node) { - MipsOperandGenerator g(this); - Int32BinopMatcher m(node); - - if (IsMipsArchVariant(kMips32r6)) { - // Select Lsa for (left + (left_of_right << imm)). - if (m.right().opcode() == IrOpcode::kWord32Shl && - CanCover(node, m.left().node()) && CanCover(node, m.right().node())) { - Int32BinopMatcher mright(m.right().node()); - if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) { - int32_t shift_value = - static_cast(mright.right().ResolvedValue()); - if (shift_value > 0 && shift_value <= 31) { - Emit(kMipsLsa, g.DefineAsRegister(node), - g.UseRegister(m.left().node()), - g.UseRegister(mright.left().node()), - g.TempImmediate(shift_value)); - return; - } - } - } - - // Select Lsa for ((left_of_left << imm) + right). - if (m.left().opcode() == IrOpcode::kWord32Shl && - CanCover(node, m.right().node()) && CanCover(node, m.left().node())) { - Int32BinopMatcher mleft(m.left().node()); - if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) { - int32_t shift_value = - static_cast(mleft.right().ResolvedValue()); - if (shift_value > 0 && shift_value <= 31) { - Emit(kMipsLsa, g.DefineAsRegister(node), - g.UseRegister(m.right().node()), - g.UseRegister(mleft.left().node()), - g.TempImmediate(shift_value)); - return; - } - } - } - } - - VisitBinop(this, node, kMipsAdd, true, kMipsAdd); -} - -void InstructionSelector::VisitInt32Sub(Node* node) { - VisitBinop(this, node, kMipsSub); -} - -void InstructionSelector::VisitInt32Mul(Node* node) { - MipsOperandGenerator g(this); - Int32BinopMatcher m(node); - if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) { - uint32_t value = static_cast(m.right().ResolvedValue()); - if (base::bits::IsPowerOfTwo(value)) { - Emit(kMipsShl | AddressingModeField::encode(kMode_None), - g.DefineAsRegister(node), g.UseRegister(m.left().node()), - g.TempImmediate(base::bits::WhichPowerOfTwo(value))); - return; - } - if (base::bits::IsPowerOfTwo(value - 1) && IsMipsArchVariant(kMips32r6) && - value - 1 > 0 && value - 1 <= 31) { - Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()), - g.UseRegister(m.left().node()), - g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1))); - return; - } - if (base::bits::IsPowerOfTwo(value + 1)) { - InstructionOperand temp = g.TempRegister(); - Emit(kMipsShl | AddressingModeField::encode(kMode_None), temp, - g.UseRegister(m.left().node()), - g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1))); - Emit(kMipsSub | AddressingModeField::encode(kMode_None), - g.DefineAsRegister(node), temp, g.UseRegister(m.left().node())); - return; - } - } - VisitRRR(this, kMipsMul, node); -} - -void InstructionSelector::VisitInt32MulHigh(Node* node) { - VisitRRR(this, kMipsMulHigh, node); -} - -void InstructionSelector::VisitUint32MulHigh(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsMulHighU, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)), - g.UseRegister(node->InputAt(1))); -} - -void InstructionSelector::VisitInt32Div(Node* node) { - MipsOperandGenerator g(this); - Int32BinopMatcher m(node); - Emit(kMipsDiv, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), - g.UseRegister(m.right().node())); -} - -void InstructionSelector::VisitUint32Div(Node* node) { - MipsOperandGenerator g(this); - Int32BinopMatcher m(node); - Emit(kMipsDivU, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()), - g.UseRegister(m.right().node())); -} - -void InstructionSelector::VisitInt32Mod(Node* node) { - MipsOperandGenerator g(this); - Int32BinopMatcher m(node); - Emit(kMipsMod, g.DefineAsRegister(node), g.UseRegister(m.left().node()), - g.UseRegister(m.right().node())); -} - -void InstructionSelector::VisitUint32Mod(Node* node) { - MipsOperandGenerator g(this); - Int32BinopMatcher m(node); - Emit(kMipsModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()), - g.UseRegister(m.right().node())); -} - -void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) { - VisitRR(this, kMipsCvtDS, node); -} - -void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) { - VisitRR(this, kMipsCvtSW, node); -} - -void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) { - VisitRR(this, kMipsCvtSUw, node); -} - -void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) { - VisitRR(this, kMipsCvtDW, node); -} - -void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) { - VisitRR(this, kMipsCvtDUw, node); -} - -void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) { - MipsOperandGenerator g(this); - InstructionCode opcode = kMipsTruncWS; - TruncateKind kind = OpParameter(node->op()); - if (kind == TruncateKind::kSetOverflowToMin) { - opcode |= MiscField::encode(true); - } - - Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) { - MipsOperandGenerator g(this); - InstructionCode opcode = kMipsTruncUwS; - TruncateKind kind = OpParameter(node->op()); - if (kind == TruncateKind::kSetOverflowToMin) { - opcode |= MiscField::encode(true); - } - - Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) { - MipsOperandGenerator g(this); - Node* value = node->InputAt(0); - // Match ChangeFloat64ToInt32(Float64Round##OP) to corresponding instruction - // which does rounding and conversion to integer format. - if (CanCover(node, value)) { - switch (value->opcode()) { - case IrOpcode::kFloat64RoundDown: - Emit(kMipsFloorWD, g.DefineAsRegister(node), - g.UseRegister(value->InputAt(0))); - return; - case IrOpcode::kFloat64RoundUp: - Emit(kMipsCeilWD, g.DefineAsRegister(node), - g.UseRegister(value->InputAt(0))); - return; - case IrOpcode::kFloat64RoundTiesEven: - Emit(kMipsRoundWD, g.DefineAsRegister(node), - g.UseRegister(value->InputAt(0))); - return; - case IrOpcode::kFloat64RoundTruncate: - Emit(kMipsTruncWD, g.DefineAsRegister(node), - g.UseRegister(value->InputAt(0))); - return; - default: - break; - } - if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) { - Node* next = value->InputAt(0); - if (CanCover(value, next)) { - // Match ChangeFloat64ToInt32(ChangeFloat32ToFloat64(Float64Round##OP)) - switch (next->opcode()) { - case IrOpcode::kFloat32RoundDown: - Emit(kMipsFloorWS, g.DefineAsRegister(node), - g.UseRegister(next->InputAt(0))); - return; - case IrOpcode::kFloat32RoundUp: - Emit(kMipsCeilWS, g.DefineAsRegister(node), - g.UseRegister(next->InputAt(0))); - return; - case IrOpcode::kFloat32RoundTiesEven: - Emit(kMipsRoundWS, g.DefineAsRegister(node), - g.UseRegister(next->InputAt(0))); - return; - case IrOpcode::kFloat32RoundTruncate: - Emit(kMipsTruncWS, g.DefineAsRegister(node), - g.UseRegister(next->InputAt(0))); - return; - default: - Emit(kMipsTruncWS, g.DefineAsRegister(node), - g.UseRegister(value->InputAt(0))); - return; - } - } else { - // Match float32 -> float64 -> int32 representation change path. - Emit(kMipsTruncWS, g.DefineAsRegister(node), - g.UseRegister(value->InputAt(0))); - return; - } - } - } - VisitRR(this, kMipsTruncWD, node); -} - -void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) { - VisitRR(this, kMipsTruncUwD, node); -} - -void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) { - VisitRR(this, kMipsTruncUwD, node); -} - -void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { - MipsOperandGenerator g(this); - Node* value = node->InputAt(0); - // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding - // instruction. - if (CanCover(node, value) && - value->opcode() == IrOpcode::kChangeInt32ToFloat64) { - Emit(kMipsCvtSW, g.DefineAsRegister(node), - g.UseRegister(value->InputAt(0))); - return; - } - VisitRR(this, kMipsCvtSD, node); -} - -void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) { - VisitRR(this, kArchTruncateDoubleToI, node); -} - -void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) { - VisitRR(this, kMipsTruncWD, node); -} - -void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) { - VisitRR(this, kMipsFloat64ExtractLowWord32, node); -} - -void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsFloat64InsertLowWord32, g.DefineAsRegister(node), - ImmediateOperand(ImmediateOperand::INLINE_INT32, 0), - g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitFloat32Add(Node* node) { - MipsOperandGenerator g(this); - if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). - Float32BinopMatcher m(node); - if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { - // For Add.S(Mul.S(x, y), z): - Float32BinopMatcher mleft(m.left().node()); - Emit(kMipsMaddS, g.DefineAsRegister(node), - g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), - g.UseRegister(mleft.right().node())); - return; - } - if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) { - // For Add.S(x, Mul.S(y, z)): - Float32BinopMatcher mright(m.right().node()); - Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()), - g.UseRegister(mright.left().node()), - g.UseRegister(mright.right().node())); - return; - } - } - VisitRRR(this, kMipsAddS, node); -} - -void InstructionSelector::VisitFloat64Add(Node* node) { - MipsOperandGenerator g(this); - if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). - Float64BinopMatcher m(node); - if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { - // For Add.D(Mul.D(x, y), z): - Float64BinopMatcher mleft(m.left().node()); - Emit(kMipsMaddD, g.DefineAsRegister(node), - g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), - g.UseRegister(mleft.right().node())); - return; - } - if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) { - // For Add.D(x, Mul.D(y, z)): - Float64BinopMatcher mright(m.right().node()); - Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()), - g.UseRegister(mright.left().node()), - g.UseRegister(mright.right().node())); - return; - } - } - VisitRRR(this, kMipsAddD, node); -} - -void InstructionSelector::VisitFloat32Sub(Node* node) { - MipsOperandGenerator g(this); - if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). - Float32BinopMatcher m(node); - if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) { - // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y). - Float32BinopMatcher mleft(m.left().node()); - Emit(kMipsMsubS, g.DefineAsRegister(node), - g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), - g.UseRegister(mleft.right().node())); - return; - } - } - VisitRRR(this, kMipsSubS, node); -} - -void InstructionSelector::VisitFloat64Sub(Node* node) { - MipsOperandGenerator g(this); - if (IsMipsArchVariant(kMips32r2)) { // Select Madd.S(z, x, y). - Float64BinopMatcher m(node); - if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) { - // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y). - Float64BinopMatcher mleft(m.left().node()); - Emit(kMipsMsubD, g.DefineAsRegister(node), - g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()), - g.UseRegister(mleft.right().node())); - return; - } - } - VisitRRR(this, kMipsSubD, node); -} - -void InstructionSelector::VisitFloat32Mul(Node* node) { - VisitRRR(this, kMipsMulS, node); -} - -void InstructionSelector::VisitFloat64Mul(Node* node) { - VisitRRR(this, kMipsMulD, node); -} - -void InstructionSelector::VisitFloat32Div(Node* node) { - VisitRRR(this, kMipsDivS, node); -} - -void InstructionSelector::VisitFloat64Div(Node* node) { - VisitRRR(this, kMipsDivD, node); -} - -void InstructionSelector::VisitFloat64Mod(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsModD, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12), - g.UseFixed(node->InputAt(1), f14)) - ->MarkAsCall(); -} - -void InstructionSelector::VisitFloat32Max(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsFloat32Max, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -} - -void InstructionSelector::VisitFloat64Max(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsFloat64Max, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -} - -void InstructionSelector::VisitFloat32Min(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsFloat32Min, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -} - -void InstructionSelector::VisitFloat64Min(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsFloat64Min, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); -} - -void InstructionSelector::VisitFloat32Abs(Node* node) { - VisitRR(this, kMipsAbsS, node); -} - -void InstructionSelector::VisitFloat64Abs(Node* node) { - VisitRR(this, kMipsAbsD, node); -} - -void InstructionSelector::VisitFloat32Sqrt(Node* node) { - VisitRR(this, kMipsSqrtS, node); -} - -void InstructionSelector::VisitFloat64Sqrt(Node* node) { - VisitRR(this, kMipsSqrtD, node); -} - -void InstructionSelector::VisitFloat32RoundDown(Node* node) { - VisitRR(this, kMipsFloat32RoundDown, node); -} - -void InstructionSelector::VisitFloat64RoundDown(Node* node) { - VisitRR(this, kMipsFloat64RoundDown, node); -} - -void InstructionSelector::VisitFloat32RoundUp(Node* node) { - VisitRR(this, kMipsFloat32RoundUp, node); -} - -void InstructionSelector::VisitFloat64RoundUp(Node* node) { - VisitRR(this, kMipsFloat64RoundUp, node); -} - -void InstructionSelector::VisitFloat32RoundTruncate(Node* node) { - VisitRR(this, kMipsFloat32RoundTruncate, node); -} - -void InstructionSelector::VisitFloat64RoundTruncate(Node* node) { - VisitRR(this, kMipsFloat64RoundTruncate, node); -} - -void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) { - UNREACHABLE(); -} - -void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) { - VisitRR(this, kMipsFloat32RoundTiesEven, node); -} - -void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) { - VisitRR(this, kMipsFloat64RoundTiesEven, node); -} - -void InstructionSelector::VisitFloat32Neg(Node* node) { - VisitRR(this, kMipsNegS, node); -} - -void InstructionSelector::VisitFloat64Neg(Node* node) { - VisitRR(this, kMipsNegD, node); -} - -void InstructionSelector::VisitFloat64Ieee754Binop(Node* node, - InstructionCode opcode) { - MipsOperandGenerator g(this); - Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f2), - g.UseFixed(node->InputAt(1), f4)) - ->MarkAsCall(); -} - -void InstructionSelector::VisitFloat64Ieee754Unop(Node* node, - InstructionCode opcode) { - MipsOperandGenerator g(this); - Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f12)) - ->MarkAsCall(); -} - -void InstructionSelector::EmitMoveParamToFPR(Node* node, int index) {} - -void InstructionSelector::EmitMoveFPRToParam(InstructionOperand* op, - LinkageLocation location) {} - -void InstructionSelector::EmitPrepareArguments( - ZoneVector* arguments, const CallDescriptor* call_descriptor, - Node* node) { - MipsOperandGenerator g(this); - - // Prepare for C function call. - if (call_descriptor->IsCFunctionCall()) { - Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast( - call_descriptor->ParameterCount())), - 0, nullptr, 0, nullptr); - - // Poke any stack arguments. - int slot = kCArgSlotCount; - for (PushParameter input : (*arguments)) { - if (input.node) { - Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), - g.TempImmediate(slot << kSystemPointerSizeLog2)); - ++slot; - } - } - } else { - // Possibly align stack here for functions. - int push_count = static_cast(call_descriptor->ParameterSlotCount()); - if (push_count > 0) { - // Calculate needed space - int stack_size = 0; - for (size_t n = 0; n < arguments->size(); ++n) { - PushParameter input = (*arguments)[n]; - if (input.node) { - stack_size += input.location.GetSizeInPointers(); - } - } - Emit(kMipsStackClaim, g.NoOutput(), - g.TempImmediate(stack_size << kSystemPointerSizeLog2)); - } - for (size_t n = 0; n < arguments->size(); ++n) { - PushParameter input = (*arguments)[n]; - if (input.node) { - Emit(kMipsStoreToStackSlot, g.NoOutput(), g.UseRegister(input.node), - g.TempImmediate(n << kSystemPointerSizeLog2)); - } - } - } -} - -void InstructionSelector::EmitPrepareResults( - ZoneVector* results, const CallDescriptor* call_descriptor, - Node* node) { - MipsOperandGenerator g(this); - - for (PushParameter output : *results) { - if (!output.location.IsCallerFrameSlot()) continue; - // Skip any alignment holes in nodes. - if (output.node != nullptr) { - DCHECK(!call_descriptor->IsCFunctionCall()); - if (output.location.GetType() == MachineType::Float32()) { - MarkAsFloat32(output.node); - } else if (output.location.GetType() == MachineType::Float64()) { - MarkAsFloat64(output.node); - } else if (output.location.GetType() == MachineType::Simd128()) { - MarkAsSimd128(output.node); - } - int offset = call_descriptor->GetOffsetToReturns(); - int reverse_slot = -output.location.GetLocation() - offset; - Emit(kMipsPeek, g.DefineAsRegister(output.node), - g.UseImmediate(reverse_slot)); - } - } -} - -bool InstructionSelector::IsTailCallAddressImmediate() { return false; } - -void InstructionSelector::VisitUnalignedLoad(Node* node) { - LoadRepresentation load_rep = LoadRepresentationOf(node->op()); - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - - ArchOpcode opcode; - switch (load_rep.representation()) { - case MachineRepresentation::kWord8: - opcode = load_rep.IsUnsigned() ? kMipsLbu : kMipsLb; - break; - case MachineRepresentation::kWord16: - opcode = load_rep.IsUnsigned() ? kMipsUlhu : kMipsUlh; - break; - case MachineRepresentation::kTaggedSigned: // Fall through. - case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. - case MachineRepresentation::kWord32: - opcode = kMipsUlw; - break; - case MachineRepresentation::kFloat32: - opcode = kMipsUlwc1; - break; - case MachineRepresentation::kFloat64: - opcode = kMipsUldc1; - break; - case MachineRepresentation::kSimd128: - opcode = kMipsMsaLd; - break; - case MachineRepresentation::kSimd256: // Fall through. - case MachineRepresentation::kBit: // Fall through. - case MachineRepresentation::kCompressedPointer: // Fall through. - case MachineRepresentation::kCompressed: // Fall through. - case MachineRepresentation::kSandboxedPointer: // Fall through. - case MachineRepresentation::kWord64: // Fall through. - case MachineRepresentation::kMapWord: // Fall through. - case MachineRepresentation::kNone: - UNREACHABLE(); - } - - if (g.CanBeImmediate(index, opcode)) { - Emit(opcode | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); - } else { - InstructionOperand addr_reg = g.TempRegister(); - Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, - g.UseRegister(index), g.UseRegister(base)); - // Emit desired load opcode, using temp addr_reg. - Emit(opcode | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); - } -} - -void InstructionSelector::VisitUnalignedStore(Node* node) { - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - - UnalignedStoreRepresentation rep = UnalignedStoreRepresentationOf(node->op()); - - // TODO(mips): I guess this could be done in a better way. - ArchOpcode opcode; - switch (rep) { - case MachineRepresentation::kFloat32: - opcode = kMipsUswc1; - break; - case MachineRepresentation::kFloat64: - opcode = kMipsUsdc1; - break; - case MachineRepresentation::kWord8: - opcode = kMipsSb; - break; - case MachineRepresentation::kWord16: - opcode = kMipsUsh; - break; - case MachineRepresentation::kTaggedSigned: // Fall through. - case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: // Fall through. - case MachineRepresentation::kWord32: - opcode = kMipsUsw; - break; - case MachineRepresentation::kSimd128: - opcode = kMipsMsaSt; - break; - case MachineRepresentation::kSimd256: // Fall through. - case MachineRepresentation::kBit: // Fall through. - case MachineRepresentation::kCompressedPointer: // Fall through. - case MachineRepresentation::kCompressed: // Fall through. - case MachineRepresentation::kSandboxedPointer: // Fall through. - case MachineRepresentation::kWord64: // Fall through. - case MachineRepresentation::kMapWord: // Fall through. - case MachineRepresentation::kNone: - UNREACHABLE(); - } - - if (g.CanBeImmediate(index, opcode)) { - Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), - g.UseRegister(base), g.UseImmediate(index), - g.UseRegisterOrImmediateZero(value)); - } else { - InstructionOperand addr_reg = g.TempRegister(); - Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, - g.UseRegister(index), g.UseRegister(base)); - // Emit desired store opcode, using temp addr_reg. - Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), - addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); - } -} - -namespace { -// Shared routine for multiple compare operations. -static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, - InstructionOperand left, InstructionOperand right, - FlagsContinuation* cont) { - selector->EmitWithContinuation(opcode, left, right, cont); -} - -// Shared routine for multiple float32 compare operations. -void VisitFloat32Compare(InstructionSelector* selector, Node* node, - FlagsContinuation* cont) { - MipsOperandGenerator g(selector); - Float32BinopMatcher m(node); - InstructionOperand lhs, rhs; - - lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) - : g.UseRegister(m.left().node()); - rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) - : g.UseRegister(m.right().node()); - VisitCompare(selector, kMipsCmpS, lhs, rhs, cont); -} - -// Shared routine for multiple float64 compare operations. -void VisitFloat64Compare(InstructionSelector* selector, Node* node, - FlagsContinuation* cont) { - MipsOperandGenerator g(selector); - Float64BinopMatcher m(node); - InstructionOperand lhs, rhs; - - lhs = m.left().IsZero() ? g.UseImmediate(m.left().node()) - : g.UseRegister(m.left().node()); - rhs = m.right().IsZero() ? g.UseImmediate(m.right().node()) - : g.UseRegister(m.right().node()); - VisitCompare(selector, kMipsCmpD, lhs, rhs, cont); -} - -// Shared routine for multiple word compare operations. -void VisitWordCompare(InstructionSelector* selector, Node* node, - InstructionCode opcode, FlagsContinuation* cont, - bool commutative) { - MipsOperandGenerator g(selector); - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); - - // Match immediates on left or right side of comparison. - if (g.CanBeImmediate(right, opcode)) { - if (opcode == kMipsTst) { - VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right), - cont); - } else { - switch (cont->condition()) { - case kEqual: - case kNotEqual: - if (cont->IsSet()) { - VisitCompare(selector, opcode, g.UseRegister(left), - g.UseImmediate(right), cont); - } else { - VisitCompare(selector, opcode, g.UseRegister(left), - g.UseRegister(right), cont); - } - break; - case kSignedLessThan: - case kSignedGreaterThanOrEqual: - case kUnsignedLessThan: - case kUnsignedGreaterThanOrEqual: - VisitCompare(selector, opcode, g.UseRegister(left), - g.UseImmediate(right), cont); - break; - default: - VisitCompare(selector, opcode, g.UseRegister(left), - g.UseRegister(right), cont); - } - } - } else if (g.CanBeImmediate(left, opcode)) { - if (!commutative) cont->Commute(); - if (opcode == kMipsTst) { - VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left), - cont); - } else { - switch (cont->condition()) { - case kEqual: - case kNotEqual: - if (cont->IsSet()) { - VisitCompare(selector, opcode, g.UseRegister(right), - g.UseImmediate(left), cont); - } else { - VisitCompare(selector, opcode, g.UseRegister(right), - g.UseRegister(left), cont); - } - break; - case kSignedLessThan: - case kSignedGreaterThanOrEqual: - case kUnsignedLessThan: - case kUnsignedGreaterThanOrEqual: - VisitCompare(selector, opcode, g.UseRegister(right), - g.UseImmediate(left), cont); - break; - default: - VisitCompare(selector, opcode, g.UseRegister(right), - g.UseRegister(left), cont); - } - } - } else { - VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right), - cont); - } -} - -void VisitWordCompare(InstructionSelector* selector, Node* node, - FlagsContinuation* cont) { - VisitWordCompare(selector, node, kMipsCmp, cont, false); -} - -} // namespace - -void InstructionSelector::VisitStackPointerGreaterThan( - Node* node, FlagsContinuation* cont) { - StackCheckKind kind = StackCheckKindOf(node->op()); - InstructionCode opcode = - kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); - - MipsOperandGenerator g(this); - - // No outputs. - InstructionOperand* const outputs = nullptr; - const int output_count = 0; - - // TempRegister(0) is used to store the comparison result. - // Applying an offset to this stack check requires a temp register. Offsets - // are only applied to the first stack check. If applying an offset, we must - // ensure the input and temp registers do not alias, thus kUniqueRegister. - InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()}; - const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1); - const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) - ? OperandGenerator::kUniqueRegister - : OperandGenerator::kRegister; - - Node* const value = node->InputAt(0); - InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; - static constexpr int input_count = arraysize(inputs); - - EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, - temp_count, temps, cont); -} - -// Shared routine for word comparisons against zero. -void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, - FlagsContinuation* cont) { - // Try to combine with comparisons against 0 by simply inverting the branch. - while (value->opcode() == IrOpcode::kWord32Equal && CanCover(user, value)) { - Int32BinopMatcher m(value); - if (!m.right().Is(0)) break; - - user = value; - value = m.left().node(); - cont->Negate(); - } - - if (CanCover(user, value)) { - switch (value->opcode()) { - case IrOpcode::kWord32Equal: - cont->OverwriteAndNegateIfEqual(kEqual); - return VisitWordCompare(this, value, cont); - case IrOpcode::kInt32LessThan: - cont->OverwriteAndNegateIfEqual(kSignedLessThan); - return VisitWordCompare(this, value, cont); - case IrOpcode::kInt32LessThanOrEqual: - cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual); - return VisitWordCompare(this, value, cont); - case IrOpcode::kUint32LessThan: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); - return VisitWordCompare(this, value, cont); - case IrOpcode::kUint32LessThanOrEqual: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); - return VisitWordCompare(this, value, cont); - case IrOpcode::kFloat32Equal: - cont->OverwriteAndNegateIfEqual(kEqual); - return VisitFloat32Compare(this, value, cont); - case IrOpcode::kFloat32LessThan: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); - return VisitFloat32Compare(this, value, cont); - case IrOpcode::kFloat32LessThanOrEqual: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); - return VisitFloat32Compare(this, value, cont); - case IrOpcode::kFloat64Equal: - cont->OverwriteAndNegateIfEqual(kEqual); - return VisitFloat64Compare(this, value, cont); - case IrOpcode::kFloat64LessThan: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThan); - return VisitFloat64Compare(this, value, cont); - case IrOpcode::kFloat64LessThanOrEqual: - cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual); - return VisitFloat64Compare(this, value, cont); - case IrOpcode::kProjection: - // Check if this is the overflow output projection of an - // WithOverflow node. - if (ProjectionIndexOf(value->op()) == 1u) { - // We cannot combine the WithOverflow with this branch - // unless the 0th projection (the use of the actual value of the - // is either nullptr, which means there's no use of the - // actual value, or was already defined, which means it is scheduled - // *AFTER* this branch). - Node* const node = value->InputAt(0); - Node* const result = NodeProperties::FindProjection(node, 0); - if (!result || IsDefined(result)) { - switch (node->opcode()) { - case IrOpcode::kInt32AddWithOverflow: - cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop(this, node, kMipsAddOvf, cont); - case IrOpcode::kInt32SubWithOverflow: - cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop(this, node, kMipsSubOvf, cont); - case IrOpcode::kInt32MulWithOverflow: - cont->OverwriteAndNegateIfEqual(kOverflow); - return VisitBinop(this, node, kMipsMulOvf, cont); - default: - break; - } - } - } - break; - case IrOpcode::kWord32And: - return VisitWordCompare(this, value, kMipsTst, cont, true); - case IrOpcode::kStackPointerGreaterThan: - cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition); - return VisitStackPointerGreaterThan(value, cont); - default: - break; - } - } - - // Continuation could not be combined with a compare, emit compare against 0. - MipsOperandGenerator g(this); - InstructionOperand const value_operand = g.UseRegister(value); - EmitWithContinuation(kMipsCmp, value_operand, g.TempImmediate(0), cont); -} - -void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) { - MipsOperandGenerator g(this); - InstructionOperand value_operand = g.UseRegister(node->InputAt(0)); - - // Emit either ArchTableSwitch or ArchBinarySearchSwitch. - if (enable_switch_jump_table_ == kEnableSwitchJumpTable) { - static const size_t kMaxTableSwitchValueRange = 2 << 16; - size_t table_space_cost = 9 + sw.value_range(); - size_t table_time_cost = 3; - size_t lookup_space_cost = 2 + 2 * sw.case_count(); - size_t lookup_time_cost = sw.case_count(); - if (sw.case_count() > 0 && - table_space_cost + 3 * table_time_cost <= - lookup_space_cost + 3 * lookup_time_cost && - sw.min_value() > std::numeric_limits::min() && - sw.value_range() <= kMaxTableSwitchValueRange) { - InstructionOperand index_operand = value_operand; - if (sw.min_value()) { - index_operand = g.TempRegister(); - Emit(kMipsSub, index_operand, value_operand, - g.TempImmediate(sw.min_value())); - } - // Generate a table lookup. - return EmitTableSwitch(sw, index_operand); - } - } - - // Generate a tree of conditional jumps. - return EmitBinarySearchSwitch(std::move(sw), value_operand); -} - -void InstructionSelector::VisitWord32Equal(Node* const node) { - FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); - Int32BinopMatcher m(node); - if (m.right().Is(0)) { - return VisitWordCompareZero(m.node(), m.left().node(), &cont); - } - VisitWordCompare(this, node, &cont); -} - -void InstructionSelector::VisitInt32LessThan(Node* node) { - FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node); - VisitWordCompare(this, node, &cont); -} - -void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) { - FlagsContinuation cont = - FlagsContinuation::ForSet(kSignedLessThanOrEqual, node); - VisitWordCompare(this, node, &cont); -} - -void InstructionSelector::VisitUint32LessThan(Node* node) { - FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); - VisitWordCompare(this, node, &cont); -} - -void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) { - FlagsContinuation cont = - FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); - VisitWordCompare(this, node, &cont); -} - -void InstructionSelector::VisitInt32AddWithOverflow(Node* node) { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { - FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); - return VisitBinop(this, node, kMipsAddOvf, &cont); - } - FlagsContinuation cont; - VisitBinop(this, node, kMipsAddOvf, &cont); -} - -void InstructionSelector::VisitInt32SubWithOverflow(Node* node) { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { - FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); - return VisitBinop(this, node, kMipsSubOvf, &cont); - } - FlagsContinuation cont; - VisitBinop(this, node, kMipsSubOvf, &cont); -} - -void InstructionSelector::VisitInt32MulWithOverflow(Node* node) { - if (Node* ovf = NodeProperties::FindProjection(node, 1)) { - FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf); - return VisitBinop(this, node, kMipsMulOvf, &cont); - } - FlagsContinuation cont; - VisitBinop(this, node, kMipsMulOvf, &cont); -} - -void InstructionSelector::VisitFloat32Equal(Node* node) { - FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); - VisitFloat32Compare(this, node, &cont); -} - -void InstructionSelector::VisitFloat32LessThan(Node* node) { - FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); - VisitFloat32Compare(this, node, &cont); -} - -void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) { - FlagsContinuation cont = - FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); - VisitFloat32Compare(this, node, &cont); -} - -void InstructionSelector::VisitFloat64Equal(Node* node) { - FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node); - VisitFloat64Compare(this, node, &cont); -} - -void InstructionSelector::VisitFloat64LessThan(Node* node) { - FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node); - VisitFloat64Compare(this, node, &cont); -} - -void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) { - FlagsContinuation cont = - FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node); - VisitFloat64Compare(this, node, &cont); -} - -void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsFloat64ExtractLowWord32, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsFloat64ExtractHighWord32, g.DefineAsRegister(node), - g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) { - MipsOperandGenerator g(this); - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); - Emit(kMipsFloat64InsertLowWord32, g.DefineSameAsFirst(node), - g.UseRegister(left), g.UseRegister(right)); -} - -void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) { - MipsOperandGenerator g(this); - Node* left = node->InputAt(0); - Node* right = node->InputAt(1); - Emit(kMipsFloat64InsertHighWord32, g.DefineSameAsFirst(node), - g.UseRegister(left), g.UseRegister(right)); -} - -void InstructionSelector::VisitFloat64SilenceNaN(Node* node) { - MipsOperandGenerator g(this); - Node* left = node->InputAt(0); - InstructionOperand temps[] = {g.TempRegister()}; - Emit(kMipsFloat64SilenceNaN, g.DefineSameAsFirst(node), g.UseRegister(left), - arraysize(temps), temps); -} - -void InstructionSelector::VisitMemoryBarrier(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsSync, g.NoOutput()); -} - -void InstructionSelector::VisitWord32AtomicLoad(Node* node) { - // TODO(mips-dev): Confirm whether there is any mips32 chip in use and - // support atomic loads of tagged values with barriers. - AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op()); - LoadRepresentation load_rep = atomic_load_params.representation(); - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - ArchOpcode opcode; - switch (load_rep.representation()) { - case MachineRepresentation::kWord8: - opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8; - break; - case MachineRepresentation::kWord16: - opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16; - break; - case MachineRepresentation::kTaggedSigned: // Fall through. - case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: - case MachineRepresentation::kWord32: - opcode = kAtomicLoadWord32; - break; - default: - UNREACHABLE(); - } - - if (g.CanBeImmediate(index, opcode)) { - Emit(opcode | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index)); - } else { - InstructionOperand addr_reg = g.TempRegister(); - Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, - g.UseRegister(index), g.UseRegister(base)); - // Emit desired load opcode, using temp addr_reg. - Emit(opcode | AddressingModeField::encode(kMode_MRI), - g.DefineAsRegister(node), addr_reg, g.TempImmediate(0)); - } -} - -void InstructionSelector::VisitWord32AtomicStore(Node* node) { - // TODO(mips-dev): Confirm whether there is any mips32 chip in use and - // support atomic stores of tagged values with barriers. - AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op()); - MachineRepresentation rep = store_params.representation(); - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - ArchOpcode opcode; - switch (rep) { - case MachineRepresentation::kWord8: - opcode = kAtomicStoreWord8; - break; - case MachineRepresentation::kWord16: - opcode = kAtomicStoreWord16; - break; - case MachineRepresentation::kTaggedSigned: // Fall through. - case MachineRepresentation::kTaggedPointer: // Fall through. - case MachineRepresentation::kTagged: - case MachineRepresentation::kWord32: - opcode = kAtomicStoreWord32; - break; - default: - UNREACHABLE(); - } - - if (g.CanBeImmediate(index, opcode)) { - Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), - g.UseRegister(base), g.UseImmediate(index), - g.UseRegisterOrImmediateZero(value)); - } else { - InstructionOperand addr_reg = g.TempRegister(); - Emit(kMipsAdd | AddressingModeField::encode(kMode_None), addr_reg, - g.UseRegister(index), g.UseRegister(base)); - // Emit desired store opcode, using temp addr_reg. - Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(), - addr_reg, g.TempImmediate(0), g.UseRegisterOrImmediateZero(value)); - } -} - -void InstructionSelector::VisitWord32AtomicExchange(Node* node) { - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Int8()) { - opcode = kAtomicExchangeInt8; - } else if (type == MachineType::Uint8()) { - opcode = kAtomicExchangeUint8; - } else if (type == MachineType::Int16()) { - opcode = kAtomicExchangeInt16; - } else if (type == MachineType::Uint16()) { - opcode = kAtomicExchangeUint16; - } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { - opcode = kAtomicExchangeWord32; - } else { - UNREACHABLE(); - } - - AddressingMode addressing_mode = kMode_MRI; - InstructionOperand inputs[3]; - size_t input_count = 0; - inputs[input_count++] = g.UseUniqueRegister(base); - inputs[input_count++] = g.UseUniqueRegister(index); - inputs[input_count++] = g.UseUniqueRegister(value); - InstructionOperand outputs[1]; - outputs[0] = g.UseUniqueRegister(node); - InstructionOperand temp[3]; - temp[0] = g.TempRegister(); - temp[1] = g.TempRegister(); - temp[2] = g.TempRegister(); - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); - Emit(code, 1, outputs, input_count, inputs, 3, temp); -} - -void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) { - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* old_value = node->InputAt(2); - Node* new_value = node->InputAt(3); - ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Int8()) { - opcode = kAtomicCompareExchangeInt8; - } else if (type == MachineType::Uint8()) { - opcode = kAtomicCompareExchangeUint8; - } else if (type == MachineType::Int16()) { - opcode = kAtomicCompareExchangeInt16; - } else if (type == MachineType::Uint16()) { - opcode = kAtomicCompareExchangeUint16; - } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { - opcode = kAtomicCompareExchangeWord32; - } else { - UNREACHABLE(); - } - - AddressingMode addressing_mode = kMode_MRI; - InstructionOperand inputs[4]; - size_t input_count = 0; - inputs[input_count++] = g.UseUniqueRegister(base); - inputs[input_count++] = g.UseUniqueRegister(index); - inputs[input_count++] = g.UseUniqueRegister(old_value); - inputs[input_count++] = g.UseUniqueRegister(new_value); - InstructionOperand outputs[1]; - outputs[0] = g.UseUniqueRegister(node); - InstructionOperand temp[3]; - temp[0] = g.TempRegister(); - temp[1] = g.TempRegister(); - temp[2] = g.TempRegister(); - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); - Emit(code, 1, outputs, input_count, inputs, 3, temp); -} - -void InstructionSelector::VisitWord32AtomicBinaryOperation( - Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op, - ArchOpcode uint16_op, ArchOpcode word32_op) { - MipsOperandGenerator g(this); - Node* base = node->InputAt(0); - Node* index = node->InputAt(1); - Node* value = node->InputAt(2); - ArchOpcode opcode; - MachineType type = AtomicOpType(node->op()); - if (type == MachineType::Int8()) { - opcode = int8_op; - } else if (type == MachineType::Uint8()) { - opcode = uint8_op; - } else if (type == MachineType::Int16()) { - opcode = int16_op; - } else if (type == MachineType::Uint16()) { - opcode = uint16_op; - } else if (type == MachineType::Int32() || type == MachineType::Uint32()) { - opcode = word32_op; - } else { - UNREACHABLE(); - } - - AddressingMode addressing_mode = kMode_MRI; - InstructionOperand inputs[3]; - size_t input_count = 0; - inputs[input_count++] = g.UseUniqueRegister(base); - inputs[input_count++] = g.UseUniqueRegister(index); - inputs[input_count++] = g.UseUniqueRegister(value); - InstructionOperand outputs[1]; - outputs[0] = g.UseUniqueRegister(node); - InstructionOperand temps[4]; - temps[0] = g.TempRegister(); - temps[1] = g.TempRegister(); - temps[2] = g.TempRegister(); - temps[3] = g.TempRegister(); - InstructionCode code = opcode | AddressingModeField::encode(addressing_mode); - Emit(code, 1, outputs, input_count, inputs, 4, temps); -} - -#define VISIT_ATOMIC_BINOP(op) \ - void InstructionSelector::VisitWord32Atomic##op(Node* node) { \ - VisitWord32AtomicBinaryOperation( \ - node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \ - kAtomic##op##Uint16, kAtomic##op##Word32); \ - } -VISIT_ATOMIC_BINOP(Add) -VISIT_ATOMIC_BINOP(Sub) -VISIT_ATOMIC_BINOP(And) -VISIT_ATOMIC_BINOP(Or) -VISIT_ATOMIC_BINOP(Xor) -#undef VISIT_ATOMIC_BINOP - -void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) { - UNREACHABLE(); -} - -void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) { - UNREACHABLE(); -} - -#define SIMD_TYPE_LIST(V) \ - V(F32x4) \ - V(I32x4) \ - V(I16x8) \ - V(I8x16) - -#define SIMD_UNOP_LIST(V) \ - V(F64x2Abs, kMipsF64x2Abs) \ - V(F64x2Neg, kMipsF64x2Neg) \ - V(F64x2Sqrt, kMipsF64x2Sqrt) \ - V(F64x2Ceil, kMipsF64x2Ceil) \ - V(F64x2Floor, kMipsF64x2Floor) \ - V(F64x2Trunc, kMipsF64x2Trunc) \ - V(F64x2NearestInt, kMipsF64x2NearestInt) \ - V(F64x2ConvertLowI32x4S, kMipsF64x2ConvertLowI32x4S) \ - V(F64x2ConvertLowI32x4U, kMipsF64x2ConvertLowI32x4U) \ - V(F64x2PromoteLowF32x4, kMipsF64x2PromoteLowF32x4) \ - V(I64x2Neg, kMipsI64x2Neg) \ - V(I64x2BitMask, kMipsI64x2BitMask) \ - V(I64x2Abs, kMipsI64x2Abs) \ - V(I64x2SConvertI32x4Low, kMipsI64x2SConvertI32x4Low) \ - V(I64x2SConvertI32x4High, kMipsI64x2SConvertI32x4High) \ - V(I64x2UConvertI32x4Low, kMipsI64x2UConvertI32x4Low) \ - V(I64x2UConvertI32x4High, kMipsI64x2UConvertI32x4High) \ - V(F32x4SConvertI32x4, kMipsF32x4SConvertI32x4) \ - V(F32x4UConvertI32x4, kMipsF32x4UConvertI32x4) \ - V(F32x4Abs, kMipsF32x4Abs) \ - V(F32x4Neg, kMipsF32x4Neg) \ - V(F32x4Sqrt, kMipsF32x4Sqrt) \ - V(F32x4Ceil, kMipsF32x4Ceil) \ - V(F32x4Floor, kMipsF32x4Floor) \ - V(F32x4Trunc, kMipsF32x4Trunc) \ - V(F32x4NearestInt, kMipsF32x4NearestInt) \ - V(F32x4DemoteF64x2Zero, kMipsF32x4DemoteF64x2Zero) \ - V(I32x4SConvertF32x4, kMipsI32x4SConvertF32x4) \ - V(I32x4UConvertF32x4, kMipsI32x4UConvertF32x4) \ - V(I32x4Neg, kMipsI32x4Neg) \ - V(I32x4BitMask, kMipsI32x4BitMask) \ - V(I32x4SConvertI16x8Low, kMipsI32x4SConvertI16x8Low) \ - V(I32x4SConvertI16x8High, kMipsI32x4SConvertI16x8High) \ - V(I32x4UConvertI16x8Low, kMipsI32x4UConvertI16x8Low) \ - V(I32x4UConvertI16x8High, kMipsI32x4UConvertI16x8High) \ - V(I32x4ExtAddPairwiseI16x8S, kMipsI32x4ExtAddPairwiseI16x8S) \ - V(I32x4ExtAddPairwiseI16x8U, kMipsI32x4ExtAddPairwiseI16x8U) \ - V(I32x4TruncSatF64x2SZero, kMipsI32x4TruncSatF64x2SZero) \ - V(I32x4TruncSatF64x2UZero, kMipsI32x4TruncSatF64x2UZero) \ - V(I16x8Neg, kMipsI16x8Neg) \ - V(I16x8BitMask, kMipsI16x8BitMask) \ - V(I16x8SConvertI8x16Low, kMipsI16x8SConvertI8x16Low) \ - V(I16x8SConvertI8x16High, kMipsI16x8SConvertI8x16High) \ - V(I16x8UConvertI8x16Low, kMipsI16x8UConvertI8x16Low) \ - V(I16x8UConvertI8x16High, kMipsI16x8UConvertI8x16High) \ - V(I16x8ExtAddPairwiseI8x16S, kMipsI16x8ExtAddPairwiseI8x16S) \ - V(I16x8ExtAddPairwiseI8x16U, kMipsI16x8ExtAddPairwiseI8x16U) \ - V(I8x16Neg, kMipsI8x16Neg) \ - V(I8x16Popcnt, kMipsI8x16Popcnt) \ - V(I8x16BitMask, kMipsI8x16BitMask) \ - V(S128Not, kMipsS128Not) \ - V(I64x2AllTrue, kMipsI64x2AllTrue) \ - V(I32x4AllTrue, kMipsI32x4AllTrue) \ - V(I16x8AllTrue, kMipsI16x8AllTrue) \ - V(I8x16AllTrue, kMipsI8x16AllTrue) \ - V(V128AnyTrue, kMipsV128AnyTrue) - -#define SIMD_SHIFT_OP_LIST(V) \ - V(I64x2Shl) \ - V(I64x2ShrS) \ - V(I64x2ShrU) \ - V(I32x4Shl) \ - V(I32x4ShrS) \ - V(I32x4ShrU) \ - V(I16x8Shl) \ - V(I16x8ShrS) \ - V(I16x8ShrU) \ - V(I8x16Shl) \ - V(I8x16ShrS) \ - V(I8x16ShrU) - -#define SIMD_BINOP_LIST(V) \ - V(F64x2Add, kMipsF64x2Add) \ - V(F64x2Sub, kMipsF64x2Sub) \ - V(F64x2Mul, kMipsF64x2Mul) \ - V(F64x2Div, kMipsF64x2Div) \ - V(F64x2Min, kMipsF64x2Min) \ - V(F64x2Max, kMipsF64x2Max) \ - V(F64x2Eq, kMipsF64x2Eq) \ - V(F64x2Ne, kMipsF64x2Ne) \ - V(F64x2Lt, kMipsF64x2Lt) \ - V(F64x2Le, kMipsF64x2Le) \ - V(I64x2Eq, kMipsI64x2Eq) \ - V(I64x2Ne, kMipsI64x2Ne) \ - V(I64x2Add, kMipsI64x2Add) \ - V(I64x2Sub, kMipsI64x2Sub) \ - V(I64x2Mul, kMipsI64x2Mul) \ - V(I64x2GtS, kMipsI64x2GtS) \ - V(I64x2GeS, kMipsI64x2GeS) \ - V(I64x2ExtMulLowI32x4S, kMipsI64x2ExtMulLowI32x4S) \ - V(I64x2ExtMulHighI32x4S, kMipsI64x2ExtMulHighI32x4S) \ - V(I64x2ExtMulLowI32x4U, kMipsI64x2ExtMulLowI32x4U) \ - V(I64x2ExtMulHighI32x4U, kMipsI64x2ExtMulHighI32x4U) \ - V(F32x4Add, kMipsF32x4Add) \ - V(F32x4Sub, kMipsF32x4Sub) \ - V(F32x4Mul, kMipsF32x4Mul) \ - V(F32x4Div, kMipsF32x4Div) \ - V(F32x4Max, kMipsF32x4Max) \ - V(F32x4Min, kMipsF32x4Min) \ - V(F32x4Eq, kMipsF32x4Eq) \ - V(F32x4Ne, kMipsF32x4Ne) \ - V(F32x4Lt, kMipsF32x4Lt) \ - V(F32x4Le, kMipsF32x4Le) \ - V(I32x4Add, kMipsI32x4Add) \ - V(I32x4Sub, kMipsI32x4Sub) \ - V(I32x4Mul, kMipsI32x4Mul) \ - V(I32x4MaxS, kMipsI32x4MaxS) \ - V(I32x4MinS, kMipsI32x4MinS) \ - V(I32x4MaxU, kMipsI32x4MaxU) \ - V(I32x4MinU, kMipsI32x4MinU) \ - V(I32x4Eq, kMipsI32x4Eq) \ - V(I32x4Ne, kMipsI32x4Ne) \ - V(I32x4GtS, kMipsI32x4GtS) \ - V(I32x4GeS, kMipsI32x4GeS) \ - V(I32x4GtU, kMipsI32x4GtU) \ - V(I32x4GeU, kMipsI32x4GeU) \ - V(I32x4Abs, kMipsI32x4Abs) \ - V(I32x4DotI16x8S, kMipsI32x4DotI16x8S) \ - V(I32x4ExtMulLowI16x8S, kMipsI32x4ExtMulLowI16x8S) \ - V(I32x4ExtMulHighI16x8S, kMipsI32x4ExtMulHighI16x8S) \ - V(I32x4ExtMulLowI16x8U, kMipsI32x4ExtMulLowI16x8U) \ - V(I32x4ExtMulHighI16x8U, kMipsI32x4ExtMulHighI16x8U) \ - V(I16x8Add, kMipsI16x8Add) \ - V(I16x8AddSatS, kMipsI16x8AddSatS) \ - V(I16x8AddSatU, kMipsI16x8AddSatU) \ - V(I16x8Sub, kMipsI16x8Sub) \ - V(I16x8SubSatS, kMipsI16x8SubSatS) \ - V(I16x8SubSatU, kMipsI16x8SubSatU) \ - V(I16x8Mul, kMipsI16x8Mul) \ - V(I16x8MaxS, kMipsI16x8MaxS) \ - V(I16x8MinS, kMipsI16x8MinS) \ - V(I16x8MaxU, kMipsI16x8MaxU) \ - V(I16x8MinU, kMipsI16x8MinU) \ - V(I16x8Eq, kMipsI16x8Eq) \ - V(I16x8Ne, kMipsI16x8Ne) \ - V(I16x8GtS, kMipsI16x8GtS) \ - V(I16x8GeS, kMipsI16x8GeS) \ - V(I16x8GtU, kMipsI16x8GtU) \ - V(I16x8GeU, kMipsI16x8GeU) \ - V(I16x8SConvertI32x4, kMipsI16x8SConvertI32x4) \ - V(I16x8UConvertI32x4, kMipsI16x8UConvertI32x4) \ - V(I16x8Q15MulRSatS, kMipsI16x8Q15MulRSatS) \ - V(I16x8ExtMulLowI8x16S, kMipsI16x8ExtMulLowI8x16S) \ - V(I16x8ExtMulHighI8x16S, kMipsI16x8ExtMulHighI8x16S) \ - V(I16x8ExtMulLowI8x16U, kMipsI16x8ExtMulLowI8x16U) \ - V(I16x8ExtMulHighI8x16U, kMipsI16x8ExtMulHighI8x16U) \ - V(I16x8RoundingAverageU, kMipsI16x8RoundingAverageU) \ - V(I16x8Abs, kMipsI16x8Abs) \ - V(I8x16Add, kMipsI8x16Add) \ - V(I8x16AddSatS, kMipsI8x16AddSatS) \ - V(I8x16AddSatU, kMipsI8x16AddSatU) \ - V(I8x16Sub, kMipsI8x16Sub) \ - V(I8x16SubSatS, kMipsI8x16SubSatS) \ - V(I8x16SubSatU, kMipsI8x16SubSatU) \ - V(I8x16MaxS, kMipsI8x16MaxS) \ - V(I8x16MinS, kMipsI8x16MinS) \ - V(I8x16MaxU, kMipsI8x16MaxU) \ - V(I8x16MinU, kMipsI8x16MinU) \ - V(I8x16Eq, kMipsI8x16Eq) \ - V(I8x16Ne, kMipsI8x16Ne) \ - V(I8x16GtS, kMipsI8x16GtS) \ - V(I8x16GeS, kMipsI8x16GeS) \ - V(I8x16GtU, kMipsI8x16GtU) \ - V(I8x16GeU, kMipsI8x16GeU) \ - V(I8x16RoundingAverageU, kMipsI8x16RoundingAverageU) \ - V(I8x16SConvertI16x8, kMipsI8x16SConvertI16x8) \ - V(I8x16UConvertI16x8, kMipsI8x16UConvertI16x8) \ - V(I8x16Abs, kMipsI8x16Abs) \ - V(S128And, kMipsS128And) \ - V(S128Or, kMipsS128Or) \ - V(S128Xor, kMipsS128Xor) \ - V(S128AndNot, kMipsS128AndNot) - -void InstructionSelector::VisitS128Const(Node* node) { UNIMPLEMENTED(); } - -void InstructionSelector::VisitS128Zero(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsS128Zero, g.DefineSameAsFirst(node)); -} - -#define SIMD_VISIT_SPLAT(Type) \ - void InstructionSelector::Visit##Type##Splat(Node* node) { \ - VisitRR(this, kMips##Type##Splat, node); \ - } -SIMD_TYPE_LIST(SIMD_VISIT_SPLAT) -SIMD_VISIT_SPLAT(F64x2) -#undef SIMD_VISIT_SPLAT - -#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \ - void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \ - VisitRRI(this, kMips##Type##ExtractLane##Sign, node); \ - } -SIMD_VISIT_EXTRACT_LANE(F64x2, ) -SIMD_VISIT_EXTRACT_LANE(F32x4, ) -SIMD_VISIT_EXTRACT_LANE(I32x4, ) -SIMD_VISIT_EXTRACT_LANE(I16x8, U) -SIMD_VISIT_EXTRACT_LANE(I16x8, S) -SIMD_VISIT_EXTRACT_LANE(I8x16, U) -SIMD_VISIT_EXTRACT_LANE(I8x16, S) -#undef SIMD_VISIT_EXTRACT_LANE - -#define SIMD_VISIT_REPLACE_LANE(Type) \ - void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \ - VisitRRIR(this, kMips##Type##ReplaceLane, node); \ - } -SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE) -SIMD_VISIT_REPLACE_LANE(F64x2) -#undef SIMD_VISIT_REPLACE_LANE - -#define SIMD_VISIT_UNOP(Name, instruction) \ - void InstructionSelector::Visit##Name(Node* node) { \ - VisitRR(this, instruction, node); \ - } -SIMD_UNOP_LIST(SIMD_VISIT_UNOP) -#undef SIMD_VISIT_UNOP - -#define SIMD_VISIT_SHIFT_OP(Name) \ - void InstructionSelector::Visit##Name(Node* node) { \ - VisitRRI(this, kMips##Name, node); \ - } -SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP) -#undef SIMD_VISIT_SHIFT_OP - -#define SIMD_VISIT_BINOP(Name, instruction) \ - void InstructionSelector::Visit##Name(Node* node) { \ - VisitRRR(this, instruction, node); \ - } -SIMD_BINOP_LIST(SIMD_VISIT_BINOP) -#undef SIMD_VISIT_BINOP - -void InstructionSelector::VisitS128Select(Node* node) { - VisitRRRR(this, kMipsS128Select, node); -} - -#if V8_ENABLE_WEBASSEMBLY -namespace { - -struct ShuffleEntry { - uint8_t shuffle[kSimd128Size]; - ArchOpcode opcode; -}; - -static const ShuffleEntry arch_shuffles[] = { - {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23}, - kMipsS32x4InterleaveRight}, - {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31}, - kMipsS32x4InterleaveLeft}, - {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27}, - kMipsS32x4PackEven}, - {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31}, - kMipsS32x4PackOdd}, - {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27}, - kMipsS32x4InterleaveEven}, - {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31}, - kMipsS32x4InterleaveOdd}, - - {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23}, - kMipsS16x8InterleaveRight}, - {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31}, - kMipsS16x8InterleaveLeft}, - {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29}, - kMipsS16x8PackEven}, - {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31}, - kMipsS16x8PackOdd}, - {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29}, - kMipsS16x8InterleaveEven}, - {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31}, - kMipsS16x8InterleaveOdd}, - {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9}, kMipsS16x4Reverse}, - {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13}, kMipsS16x2Reverse}, - - {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23}, - kMipsS8x16InterleaveRight}, - {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31}, - kMipsS8x16InterleaveLeft}, - {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30}, - kMipsS8x16PackEven}, - {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31}, - kMipsS8x16PackOdd}, - {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30}, - kMipsS8x16InterleaveEven}, - {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31}, - kMipsS8x16InterleaveOdd}, - {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kMipsS8x8Reverse}, - {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kMipsS8x4Reverse}, - {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14}, kMipsS8x2Reverse}}; - -bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table, - size_t num_entries, bool is_swizzle, - ArchOpcode* opcode) { - uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1; - for (size_t i = 0; i < num_entries; ++i) { - const ShuffleEntry& entry = table[i]; - int j = 0; - for (; j < kSimd128Size; ++j) { - if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) { - break; - } - } - if (j == kSimd128Size) { - *opcode = entry.opcode; - return true; - } - } - return false; -} - -} // namespace - -void InstructionSelector::VisitI8x16Shuffle(Node* node) { - uint8_t shuffle[kSimd128Size]; - bool is_swizzle; - CanonicalizeShuffle(node, shuffle, &is_swizzle); - uint8_t shuffle32x4[4]; - ArchOpcode opcode; - if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles), - is_swizzle, &opcode)) { - VisitRRR(this, opcode, node); - return; - } - Node* input0 = node->InputAt(0); - Node* input1 = node->InputAt(1); - uint8_t offset; - MipsOperandGenerator g(this); - if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) { - Emit(kMipsS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1), - g.UseRegister(input0), g.UseImmediate(offset)); - return; - } - if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) { - Emit(kMipsS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), - g.UseRegister(input1), - g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4))); - return; - } - Emit(kMipsI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0), - g.UseRegister(input1), - g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)), - g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)), - g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)), - g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12))); -} -#else -void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); } -#endif // V8_ENABLE_WEBASSEMBLY - -void InstructionSelector::VisitI8x16Swizzle(Node* node) { - MipsOperandGenerator g(this); - InstructionOperand temps[] = {g.TempSimd128Register()}; - // We don't want input 0 or input 1 to be the same as output, since we will - // modify output before do the calculation. - Emit(kMipsI8x16Swizzle, g.DefineAsRegister(node), - g.UseUniqueRegister(node->InputAt(0)), - g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); -} - -void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsSeb, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) { - MipsOperandGenerator g(this); - Emit(kMipsSeh, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0))); -} - -void InstructionSelector::VisitF32x4Pmin(Node* node) { - VisitUniqueRRR(this, kMipsF32x4Pmin, node); -} - -void InstructionSelector::VisitF32x4Pmax(Node* node) { - VisitUniqueRRR(this, kMipsF32x4Pmax, node); -} - -void InstructionSelector::VisitF64x2Pmin(Node* node) { - VisitUniqueRRR(this, kMipsF64x2Pmin, node); -} - -void InstructionSelector::VisitF64x2Pmax(Node* node) { - VisitUniqueRRR(this, kMipsF64x2Pmax, node); -} - -void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g, - int first_input_index, - Node* node) { - UNREACHABLE(); -} - -// static -MachineOperatorBuilder::Flags -InstructionSelector::SupportedMachineOperatorFlags() { - MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags; - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - flags |= MachineOperatorBuilder::kFloat64RoundDown | - MachineOperatorBuilder::kFloat64RoundUp | - MachineOperatorBuilder::kFloat64RoundTruncate | - MachineOperatorBuilder::kFloat64RoundTiesEven; - } - - return flags | MachineOperatorBuilder::kWord32Ctz | - MachineOperatorBuilder::kWord32Popcnt | - MachineOperatorBuilder::kInt32DivIsSafe | - MachineOperatorBuilder::kUint32DivIsSafe | - MachineOperatorBuilder::kWord32ShiftIsSafe | - MachineOperatorBuilder::kFloat32RoundDown | - MachineOperatorBuilder::kFloat32RoundUp | - MachineOperatorBuilder::kFloat32RoundTruncate | - MachineOperatorBuilder::kFloat32RoundTiesEven; -} - -// static -MachineOperatorBuilder::AlignmentRequirements -InstructionSelector::AlignmentRequirements() { - if (IsMipsArchVariant(kMips32r6)) { - return MachineOperatorBuilder::AlignmentRequirements:: - FullUnalignedAccessSupport(); - } else { - DCHECK(IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r1) || - IsMipsArchVariant(kMips32r2)); - return MachineOperatorBuilder::AlignmentRequirements:: - NoUnalignedAccessSupport(); - } -} - -#undef SIMD_BINOP_LIST -#undef SIMD_SHIFT_OP_LIST -#undef SIMD_UNOP_LIST -#undef SIMD_TYPE_LIST -#undef TRACE_UNIMPL -#undef TRACE - -} // namespace compiler -} // namespace internal -} // namespace v8 diff --git a/src/compiler/c-linkage.cc b/src/compiler/c-linkage.cc index f55155cb46..9742e41b0c 100644 --- a/src/compiler/c-linkage.cc +++ b/src/compiler/c-linkage.cc @@ -65,15 +65,6 @@ namespace { #define CALLEE_SAVE_FP_REGISTERS d8, d9, d10, d11, d12, d13, d14, d15 -#elif V8_TARGET_ARCH_MIPS -// =========================================================================== -// == mips =================================================================== -// =========================================================================== -#define STACK_SHADOW_WORDS 4 -#define PARAM_REGISTERS a0, a1, a2, a3 -#define CALLEE_SAVE_REGISTERS s0, s1, s2, s3, s4, s5, s6, s7 -#define CALLEE_SAVE_FP_REGISTERS f20, f22, f24, f26, f28, f30 - #elif V8_TARGET_ARCH_MIPS64 // =========================================================================== // == mips64 ================================================================= diff --git a/src/deoptimizer/mips/deoptimizer-mips.cc b/src/deoptimizer/mips/deoptimizer-mips.cc deleted file mode 100644 index 62a7f45788..0000000000 --- a/src/deoptimizer/mips/deoptimizer-mips.cc +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/deoptimizer/deoptimizer.h" - -namespace v8 { -namespace internal { - -const int Deoptimizer::kEagerDeoptExitSize = 3 * kInstrSize; -const int Deoptimizer::kLazyDeoptExitSize = 3 * kInstrSize; - -Float32 RegisterValues::GetFloatRegister(unsigned n) const { - return Float32::FromBits( - static_cast(double_registers_[n].get_bits())); -} - -void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { - SetFrameSlot(offset, value); -} - -void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { - SetFrameSlot(offset, value); -} - -void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { - // No embedded constant pool support. - UNREACHABLE(); -} - -void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; } - -} // namespace internal -} // namespace v8 diff --git a/src/diagnostics/mips/disasm-mips.cc b/src/diagnostics/mips/disasm-mips.cc deleted file mode 100644 index 32a0bdb048..0000000000 --- a/src/diagnostics/mips/disasm-mips.cc +++ /dev/null @@ -1,2736 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// A Disassembler object is used to disassemble a block of code instruction by -// instruction. The default implementation of the NameConverter object can be -// overriden to modify register names or to do symbol lookup on addresses. -// -// The example below will disassemble a block of code and print it to stdout. -// -// NameConverter converter; -// Disassembler d(converter); -// for (byte* pc = begin; pc < end;) { -// v8::base::EmbeddedVector buffer; -// byte* prev_pc = pc; -// pc += d.InstructionDecode(buffer, pc); -// printf("%p %08x %s\n", -// prev_pc, *reinterpret_cast(prev_pc), buffer); -// } -// -// The Disassembler class also has a convenience method to disassemble a block -// of code into a FILE*, meaning that the above functionality could also be -// achieved by just calling Disassembler::Disassemble(stdout, begin, end); - -#include -#include -#include -#include - -#if V8_TARGET_ARCH_MIPS - -#include "src/base/platform/platform.h" -#include "src/base/strings.h" -#include "src/base/vector.h" -#include "src/codegen/macro-assembler.h" -#include "src/codegen/mips/constants-mips.h" -#include "src/diagnostics/disasm.h" - -namespace v8 { -namespace internal { - -//------------------------------------------------------------------------------ - -// Decoder decodes and disassembles instructions into an output buffer. -// It uses the converter to convert register names and call destinations into -// more informative description. -class Decoder { - public: - Decoder(const disasm::NameConverter& converter, - v8::base::Vector out_buffer) - : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) { - out_buffer_[out_buffer_pos_] = '\0'; - } - - ~Decoder() {} - - Decoder(const Decoder&) = delete; - Decoder& operator=(const Decoder&) = delete; - - // Writes one disassembled instruction into 'buffer' (0-terminated). - // Returns the length of the disassembled machine instruction in bytes. - int InstructionDecode(byte* instruction); - - private: - // Bottleneck functions to print into the out_buffer. - void PrintChar(const char ch); - void Print(const char* str); - - // Printing of common values. - void PrintRegister(int reg); - void PrintFPURegister(int freg); - void PrintMSARegister(int wreg); - void PrintFPUStatusRegister(int freg); - void PrintMSAControlRegister(int creg); - void PrintRs(Instruction* instr); - void PrintRt(Instruction* instr); - void PrintRd(Instruction* instr); - void PrintFs(Instruction* instr); - void PrintFt(Instruction* instr); - void PrintFd(Instruction* instr); - void PrintSa(Instruction* instr); - void PrintLsaSa(Instruction* instr); - void PrintSd(Instruction* instr); - void PrintSs1(Instruction* instr); - void PrintSs2(Instruction* instr); - void PrintBc(Instruction* instr); - void PrintCc(Instruction* instr); - void PrintBp2(Instruction* instr); - void PrintFunction(Instruction* instr); - void PrintSecondaryField(Instruction* instr); - void PrintUImm9(Instruction* instr); - void PrintSImm9(Instruction* instr); - void PrintUImm16(Instruction* instr); - void PrintSImm16(Instruction* instr); - void PrintXImm16(Instruction* instr); - void PrintPCImm16(Instruction* instr, int delta_pc, int n_bits); - void PrintXImm18(Instruction* instr); - void PrintSImm18(Instruction* instr); - void PrintXImm19(Instruction* instr); - void PrintSImm19(Instruction* instr); - void PrintXImm21(Instruction* instr); - void PrintSImm21(Instruction* instr); - void PrintPCImm21(Instruction* instr, int delta_pc, int n_bits); - void PrintXImm26(Instruction* instr); - void PrintSImm26(Instruction* instr); - void PrintPCImm26(Instruction* instr, int delta_pc, int n_bits); - void PrintPCImm26(Instruction* instr); - void PrintCode(Instruction* instr); // For break and trap instructions. - void PrintFormat(Instruction* instr); // For floating format postfix. - void PrintMsaDataFormat(Instruction* instr); - void PrintMsaXImm8(Instruction* instr); - void PrintMsaImm8(Instruction* instr); - void PrintMsaImm5(Instruction* instr); - void PrintMsaSImm5(Instruction* instr); - void PrintMsaSImm10(Instruction* instr, bool is_mi10 = false); - void PrintMsaImmBit(Instruction* instr); - void PrintMsaImmElm(Instruction* instr); - void PrintMsaCopy(Instruction* instr); - // Printing of instruction name. - void PrintInstructionName(Instruction* instr); - - // Handle formatting of instructions and their options. - int FormatRegister(Instruction* instr, const char* option); - int FormatFPURegister(Instruction* instr, const char* option); - int FormatMSARegister(Instruction* instr, const char* option); - int FormatOption(Instruction* instr, const char* option); - void Format(Instruction* instr, const char* format); - void Unknown(Instruction* instr); - - // Each of these functions decodes one particular instruction type. - bool DecodeTypeRegisterRsType(Instruction* instr); - void DecodeTypeRegisterSRsType(Instruction* instr); - void DecodeTypeRegisterDRsType(Instruction* instr); - void DecodeTypeRegisterLRsType(Instruction* instr); - void DecodeTypeRegisterWRsType(Instruction* instr); - void DecodeTypeRegisterSPECIAL(Instruction* instr); - void DecodeTypeRegisterSPECIAL2(Instruction* instr); - void DecodeTypeRegisterSPECIAL3(Instruction* instr); - void DecodeTypeRegister(Instruction* instr); - void DecodeTypeImmediate(Instruction* instr); - void DecodeTypeImmediateSPECIAL3(Instruction* instr); - void DecodeTypeJump(Instruction* instr); - void DecodeTypeMsaI8(Instruction* instr); - void DecodeTypeMsaI5(Instruction* instr); - void DecodeTypeMsaI10(Instruction* instr); - void DecodeTypeMsaELM(Instruction* instr); - void DecodeTypeMsaBIT(Instruction* instr); - void DecodeTypeMsaMI10(Instruction* instr); - void DecodeTypeMsa3R(Instruction* instr); - void DecodeTypeMsa3RF(Instruction* instr); - void DecodeTypeMsaVec(Instruction* instr); - void DecodeTypeMsa2R(Instruction* instr); - void DecodeTypeMsa2RF(Instruction* instr); - - const disasm::NameConverter& converter_; - v8::base::Vector out_buffer_; - int out_buffer_pos_; -}; - -// Support for assertions in the Decoder formatting functions. -#define STRING_STARTS_WITH(string, compare_string) \ - (strncmp(string, compare_string, strlen(compare_string)) == 0) - -// Append the ch to the output buffer. -void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; } - -// Append the str to the output buffer. -void Decoder::Print(const char* str) { - char cur = *str++; - while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) { - PrintChar(cur); - cur = *str++; - } - out_buffer_[out_buffer_pos_] = 0; -} - -// Print the register name according to the active name converter. -void Decoder::PrintRegister(int reg) { - Print(converter_.NameOfCPURegister(reg)); -} - -void Decoder::PrintRs(Instruction* instr) { - int reg = instr->RsValue(); - PrintRegister(reg); -} - -void Decoder::PrintRt(Instruction* instr) { - int reg = instr->RtValue(); - PrintRegister(reg); -} - -void Decoder::PrintRd(Instruction* instr) { - int reg = instr->RdValue(); - PrintRegister(reg); -} - -// Print the FPUregister name according to the active name converter. -void Decoder::PrintFPURegister(int freg) { - Print(converter_.NameOfXMMRegister(freg)); -} - -void Decoder::PrintMSARegister(int wreg) { Print(MSARegisters::Name(wreg)); } - -void Decoder::PrintFPUStatusRegister(int freg) { - switch (freg) { - case kFCSRRegister: - Print("FCSR"); - break; - default: - Print(converter_.NameOfXMMRegister(freg)); - } -} - -void Decoder::PrintMSAControlRegister(int creg) { - switch (creg) { - case kMSAIRRegister: - Print("MSAIR"); - break; - case kMSACSRRegister: - Print("MSACSR"); - break; - default: - Print("no_msacreg"); - } -} - -void Decoder::PrintFs(Instruction* instr) { - int freg = instr->RsValue(); - PrintFPURegister(freg); -} - -void Decoder::PrintFt(Instruction* instr) { - int freg = instr->RtValue(); - PrintFPURegister(freg); -} - -void Decoder::PrintFd(Instruction* instr) { - int freg = instr->RdValue(); - PrintFPURegister(freg); -} - -// Print the integer value of the sa field. -void Decoder::PrintSa(Instruction* instr) { - int sa = instr->SaValue(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); -} - -// Print the integer value of the sa field of a lsa instruction. -void Decoder::PrintLsaSa(Instruction* instr) { - int sa = instr->LsaSaValue() + 1; - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa); -} - -// Print the integer value of the rd field, when it is not used as reg. -void Decoder::PrintSd(Instruction* instr) { - int sd = instr->RdValue(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd); -} - -// Print the integer value of the rd field, when used as 'ext' size. -void Decoder::PrintSs1(Instruction* instr) { - int ss = instr->RdValue(); - out_buffer_pos_ += - base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1); -} - -// Print the integer value of the rd field, when used as 'ins' size. -void Decoder::PrintSs2(Instruction* instr) { - int ss = instr->RdValue(); - int pos = instr->SaValue(); - out_buffer_pos_ += - base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1); -} - -// Print the integer value of the cc field for the bc1t/f instructions. -void Decoder::PrintBc(Instruction* instr) { - int cc = instr->FBccValue(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc); -} - -// Print the integer value of the cc field for the FP compare instructions. -void Decoder::PrintCc(Instruction* instr) { - int cc = instr->FCccValue(); - out_buffer_pos_ += - base::SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc); -} - -void Decoder::PrintBp2(Instruction* instr) { - int bp2 = instr->Bp2Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", bp2); -} - -// Print 9-bit unsigned immediate value. -void Decoder::PrintUImm9(Instruction* instr) { - int32_t imm = instr->Imm9Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); -} - -// Print 9-bit signed immediate value. -void Decoder::PrintSImm9(Instruction* instr) { - int32_t imm = ((instr->Imm9Value()) << 23) >> 23; - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); -} - -// Print 16-bit unsigned immediate value. -void Decoder::PrintUImm16(Instruction* instr) { - int32_t imm = instr->Imm16Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); -} - -// Print 16-bit signed immediate value. -void Decoder::PrintSImm16(Instruction* instr) { - int32_t imm = ((instr->Imm16Value()) << 16) >> 16; - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); -} - -// Print 16-bit hexa immediate value. -void Decoder::PrintXImm16(Instruction* instr) { - int32_t imm = instr->Imm16Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); -} - -// Print absoulte address for 16-bit offset or immediate value. -// The absolute address is calculated according following expression: -// PC + delta_pc + (offset << n_bits) -void Decoder::PrintPCImm16(Instruction* instr, int delta_pc, int n_bits) { - int16_t offset = instr->Imm16Value(); - out_buffer_pos_ += - base::SNPrintF(out_buffer_ + out_buffer_pos_, "%s", - converter_.NameOfAddress(reinterpret_cast(instr) + - delta_pc + (offset << n_bits))); -} - -// Print 18-bit signed immediate value. -void Decoder::PrintSImm18(Instruction* instr) { - int32_t imm = - ((instr->Imm18Value()) << (32 - kImm18Bits)) >> (32 - kImm18Bits); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); -} - -// Print 18-bit hexa immediate value. -void Decoder::PrintXImm18(Instruction* instr) { - int32_t imm = instr->Imm18Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); -} - -// Print 19-bit hexa immediate value. -void Decoder::PrintXImm19(Instruction* instr) { - int32_t imm = instr->Imm19Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); -} - -// Print 19-bit signed immediate value. -void Decoder::PrintSImm19(Instruction* instr) { - int32_t imm19 = instr->Imm19Value(); - // set sign - imm19 <<= (32 - kImm19Bits); - imm19 >>= (32 - kImm19Bits); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm19); -} - -// Print 21-bit immediate value. -void Decoder::PrintXImm21(Instruction* instr) { - uint32_t imm = instr->Imm21Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); -} - -// Print 21-bit signed immediate value. -void Decoder::PrintSImm21(Instruction* instr) { - int32_t imm21 = instr->Imm21Value(); - // set sign - imm21 <<= (32 - kImm21Bits); - imm21 >>= (32 - kImm21Bits); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm21); -} - -// Print absoulte address for 21-bit offset or immediate value. -// The absolute address is calculated according following expression: -// PC + delta_pc + (offset << n_bits) -void Decoder::PrintPCImm21(Instruction* instr, int delta_pc, int n_bits) { - int32_t imm21 = instr->Imm21Value(); - // set sign - imm21 <<= (32 - kImm21Bits); - imm21 >>= (32 - kImm21Bits); - out_buffer_pos_ += - base::SNPrintF(out_buffer_ + out_buffer_pos_, "%s", - converter_.NameOfAddress(reinterpret_cast(instr) + - delta_pc + (imm21 << n_bits))); -} - -// Print 26-bit hex immediate value. -void Decoder::PrintXImm26(Instruction* instr) { - uint32_t target = static_cast(instr->Imm26Value()) - << kImmFieldShift; - target = (reinterpret_cast(instr) & ~0xFFFFFFF) | target; - out_buffer_pos_ += - base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", target); -} - -// Print 26-bit signed immediate value. -void Decoder::PrintSImm26(Instruction* instr) { - int32_t imm26 = instr->Imm26Value(); - // set sign - imm26 <<= (32 - kImm26Bits); - imm26 >>= (32 - kImm26Bits); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm26); -} - -// Print absoulte address for 26-bit offset or immediate value. -// The absolute address is calculated according following expression: -// PC + delta_pc + (offset << n_bits) -void Decoder::PrintPCImm26(Instruction* instr, int delta_pc, int n_bits) { - int32_t imm26 = instr->Imm26Value(); - // set sign - imm26 <<= (32 - kImm26Bits); - imm26 >>= (32 - kImm26Bits); - out_buffer_pos_ += - base::SNPrintF(out_buffer_ + out_buffer_pos_, "%s", - converter_.NameOfAddress(reinterpret_cast(instr) + - delta_pc + (imm26 << n_bits))); -} - -// Print absoulte address for 26-bit offset or immediate value. -// The absolute address is calculated according following expression: -// PC[GPRLEN-1 .. 28] || instr_index26 || 00 -void Decoder::PrintPCImm26(Instruction* instr) { - int32_t imm26 = instr->Imm26Value(); - uint32_t pc_mask = ~0xFFFFFFF; - uint32_t pc = ((uint32_t)(instr + 1) & pc_mask) | (imm26 << 2); - out_buffer_pos_ += - base::SNPrintF(out_buffer_ + out_buffer_pos_, "%s", - converter_.NameOfAddress((reinterpret_cast(pc)))); -} - -// Print 26-bit immediate value. -void Decoder::PrintCode(Instruction* instr) { - if (instr->OpcodeFieldRaw() != SPECIAL) - return; // Not a break or trap instruction. - switch (instr->FunctionFieldRaw()) { - case BREAK: { - int32_t code = instr->Bits(25, 6); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, - "0x%05x (%d)", code, code); - break; - } - case TGE: - case TGEU: - case TLT: - case TLTU: - case TEQ: - case TNE: { - int32_t code = instr->Bits(15, 6); - out_buffer_pos_ += - base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code); - break; - } - default: // Not a break or trap instruction. - break; - } -} - -void Decoder::PrintMsaXImm8(Instruction* instr) { - int32_t imm = instr->MsaImm8Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm); -} - -void Decoder::PrintMsaImm8(Instruction* instr) { - int32_t imm = instr->MsaImm8Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); -} - -void Decoder::PrintMsaImm5(Instruction* instr) { - int32_t imm = instr->MsaImm5Value(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm); -} - -void Decoder::PrintMsaSImm5(Instruction* instr) { - int32_t imm = instr->MsaImm5Value(); - imm <<= (32 - kMsaImm5Bits); - imm >>= (32 - kMsaImm5Bits); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); -} - -void Decoder::PrintMsaSImm10(Instruction* instr, bool is_mi10) { - int32_t imm = is_mi10 ? instr->MsaImmMI10Value() : instr->MsaImm10Value(); - imm <<= (32 - kMsaImm10Bits); - imm >>= (32 - kMsaImm10Bits); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); -} - -void Decoder::PrintMsaImmBit(Instruction* instr) { - int32_t m = instr->MsaBitMValue(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", m); -} - -void Decoder::PrintMsaImmElm(Instruction* instr) { - int32_t n = instr->MsaElmNValue(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", n); -} - -void Decoder::PrintMsaCopy(Instruction* instr) { - int32_t rd = instr->WdValue(); - int32_t ws = instr->WsValue(); - int32_t n = instr->MsaElmNValue(); - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%s, %s[%u]", - converter_.NameOfCPURegister(rd), - MSARegisters::Name(ws), n); -} - -void Decoder::PrintFormat(Instruction* instr) { - char formatLetter = ' '; - switch (instr->RsFieldRaw()) { - case S: - formatLetter = 's'; - break; - case D: - formatLetter = 'd'; - break; - case W: - formatLetter = 'w'; - break; - case L: - formatLetter = 'l'; - break; - default: - UNREACHABLE(); - } - PrintChar(formatLetter); -} - -void Decoder::PrintMsaDataFormat(Instruction* instr) { - DCHECK(instr->IsMSAInstr()); - char df = ' '; - if (instr->IsMSABranchInstr()) { - switch (instr->RsFieldRaw()) { - case BZ_V: - case BNZ_V: - df = 'v'; - break; - case BZ_B: - case BNZ_B: - df = 'b'; - break; - case BZ_H: - case BNZ_H: - df = 'h'; - break; - case BZ_W: - case BNZ_W: - df = 'w'; - break; - case BZ_D: - case BNZ_D: - df = 'd'; - break; - default: - UNREACHABLE(); - } - } else { - char DF[] = {'b', 'h', 'w', 'd'}; - switch (instr->MSAMinorOpcodeField()) { - case kMsaMinorI5: - case kMsaMinorI10: - case kMsaMinor3R: - df = DF[instr->Bits(22, 21)]; - break; - case kMsaMinorMI10: - df = DF[instr->Bits(1, 0)]; - break; - case kMsaMinorBIT: - df = DF[instr->MsaBitDf()]; - break; - case kMsaMinorELM: - df = DF[instr->MsaElmDf()]; - break; - case kMsaMinor3RF: { - uint32_t opcode = instr->InstructionBits() & kMsa3RFMask; - switch (opcode) { - case FEXDO: - case FTQ: - case MUL_Q: - case MADD_Q: - case MSUB_Q: - case MULR_Q: - case MADDR_Q: - case MSUBR_Q: - df = DF[1 + instr->Bit(21)]; - break; - default: - df = DF[2 + instr->Bit(21)]; - break; - } - } break; - case kMsaMinor2R: - df = DF[instr->Bits(17, 16)]; - break; - case kMsaMinor2RF: - df = DF[2 + instr->Bit(16)]; - break; - default: - UNREACHABLE(); - } - } - - PrintChar(df); -} - -// Printing of instruction name. -void Decoder::PrintInstructionName(Instruction* instr) {} - -// Handle all register based formatting in this function to reduce the -// complexity of FormatOption. -int Decoder::FormatRegister(Instruction* instr, const char* format) { - DCHECK_EQ(format[0], 'r'); - if (format[1] == 's') { // 'rs: Rs register. - int reg = instr->RsValue(); - PrintRegister(reg); - return 2; - } else if (format[1] == 't') { // 'rt: rt register. - int reg = instr->RtValue(); - PrintRegister(reg); - return 2; - } else if (format[1] == 'd') { // 'rd: rd register. - int reg = instr->RdValue(); - PrintRegister(reg); - return 2; - } - UNREACHABLE(); -} - -// Handle all FPUregister based formatting in this function to reduce the -// complexity of FormatOption. -int Decoder::FormatFPURegister(Instruction* instr, const char* format) { - DCHECK_EQ(format[0], 'f'); - if ((CTC1 == instr->RsFieldRaw()) || (CFC1 == instr->RsFieldRaw())) { - if (format[1] == 's') { // 'fs: fs register. - int reg = instr->FsValue(); - PrintFPUStatusRegister(reg); - return 2; - } else if (format[1] == 't') { // 'ft: ft register. - int reg = instr->FtValue(); - PrintFPUStatusRegister(reg); - return 2; - } else if (format[1] == 'd') { // 'fd: fd register. - int reg = instr->FdValue(); - PrintFPUStatusRegister(reg); - return 2; - } else if (format[1] == 'r') { // 'fr: fr register. - int reg = instr->FrValue(); - PrintFPUStatusRegister(reg); - return 2; - } - } else { - if (format[1] == 's') { // 'fs: fs register. - int reg = instr->FsValue(); - PrintFPURegister(reg); - return 2; - } else if (format[1] == 't') { // 'ft: ft register. - int reg = instr->FtValue(); - PrintFPURegister(reg); - return 2; - } else if (format[1] == 'd') { // 'fd: fd register. - int reg = instr->FdValue(); - PrintFPURegister(reg); - return 2; - } else if (format[1] == 'r') { // 'fr: fr register. - int reg = instr->FrValue(); - PrintFPURegister(reg); - return 2; - } - } - UNREACHABLE(); -} - -// Handle all MSARegister based formatting in this function to reduce the -// complexity of FormatOption. -int Decoder::FormatMSARegister(Instruction* instr, const char* format) { - DCHECK_EQ(format[0], 'w'); - if (format[1] == 's') { - int reg = instr->WsValue(); - PrintMSARegister(reg); - return 2; - } else if (format[1] == 't') { - int reg = instr->WtValue(); - PrintMSARegister(reg); - return 2; - } else if (format[1] == 'd') { - int reg = instr->WdValue(); - PrintMSARegister(reg); - return 2; - } - - UNREACHABLE(); -} - -// FormatOption takes a formatting string and interprets it based on -// the current instructions. The format string points to the first -// character of the option string (the option escape has already been -// consumed by the caller.) FormatOption returns the number of -// characters that were consumed from the formatting string. -int Decoder::FormatOption(Instruction* instr, const char* format) { - switch (format[0]) { - case 'c': { // 'code for break or trap instructions. - DCHECK(STRING_STARTS_WITH(format, "code")); - PrintCode(instr); - return 4; - } - case 'i': { // 'imm16u or 'imm26. - if (format[3] == '1') { - if (format[4] == '6') { - DCHECK(STRING_STARTS_WITH(format, "imm16")); - switch (format[5]) { - case 's': - DCHECK(STRING_STARTS_WITH(format, "imm16s")); - PrintSImm16(instr); - break; - case 'u': - DCHECK(STRING_STARTS_WITH(format, "imm16u")); - PrintSImm16(instr); - break; - case 'x': - DCHECK(STRING_STARTS_WITH(format, "imm16x")); - PrintXImm16(instr); - break; - case 'p': { // The PC relative address. - DCHECK(STRING_STARTS_WITH(format, "imm16p")); - int delta_pc = 0; - int n_bits = 0; - switch (format[6]) { - case '4': { - DCHECK(STRING_STARTS_WITH(format, "imm16p4")); - delta_pc = 4; - switch (format[8]) { - case '2': - DCHECK(STRING_STARTS_WITH(format, "imm16p4s2")); - n_bits = 2; - PrintPCImm16(instr, delta_pc, n_bits); - return 9; - } - } - } - } - } - return 6; - } else if (format[4] == '8') { - DCHECK(STRING_STARTS_WITH(format, "imm18")); - switch (format[5]) { - case 's': - DCHECK(STRING_STARTS_WITH(format, "imm18s")); - PrintSImm18(instr); - break; - case 'x': - DCHECK(STRING_STARTS_WITH(format, "imm18x")); - PrintXImm18(instr); - break; - } - return 6; - } else if (format[4] == '9') { - DCHECK(STRING_STARTS_WITH(format, "imm19")); - switch (format[5]) { - case 's': - DCHECK(STRING_STARTS_WITH(format, "imm19s")); - PrintSImm19(instr); - break; - case 'x': - DCHECK(STRING_STARTS_WITH(format, "imm19x")); - PrintXImm19(instr); - break; - } - return 6; - } else if (format[4] == '0' && format[5] == 's') { - DCHECK(STRING_STARTS_WITH(format, "imm10s")); - if (format[6] == '1') { - DCHECK(STRING_STARTS_WITH(format, "imm10s1")); - PrintMsaSImm10(instr, false); - } else if (format[6] == '2') { - DCHECK(STRING_STARTS_WITH(format, "imm10s2")); - PrintMsaSImm10(instr, true); - } - return 7; - } - } else if (format[3] == '2' && format[4] == '1') { - DCHECK(STRING_STARTS_WITH(format, "imm21")); - switch (format[5]) { - case 's': - DCHECK(STRING_STARTS_WITH(format, "imm21s")); - PrintSImm21(instr); - break; - case 'x': - DCHECK(STRING_STARTS_WITH(format, "imm21x")); - PrintXImm21(instr); - break; - case 'p': { // The PC relative address. - DCHECK(STRING_STARTS_WITH(format, "imm21p")); - int delta_pc = 0; - int n_bits = 0; - switch (format[6]) { - case '4': { - DCHECK(STRING_STARTS_WITH(format, "imm21p4")); - delta_pc = 4; - switch (format[8]) { - case '2': - DCHECK(STRING_STARTS_WITH(format, "imm21p4s2")); - n_bits = 2; - PrintPCImm21(instr, delta_pc, n_bits); - return 9; - } - } - } - } - } - return 6; - } else if (format[3] == '2' && format[4] == '6') { - DCHECK(STRING_STARTS_WITH(format, "imm26")); - switch (format[5]) { - case 's': - DCHECK(STRING_STARTS_WITH(format, "imm26s")); - PrintSImm26(instr); - break; - case 'x': - DCHECK(STRING_STARTS_WITH(format, "imm26x")); - PrintXImm26(instr); - break; - case 'p': { // The PC relative address. - DCHECK(STRING_STARTS_WITH(format, "imm26p")); - int delta_pc = 0; - int n_bits = 0; - switch (format[6]) { - case '4': { - DCHECK(STRING_STARTS_WITH(format, "imm26p4")); - delta_pc = 4; - switch (format[8]) { - case '2': - DCHECK(STRING_STARTS_WITH(format, "imm26p4s2")); - n_bits = 2; - PrintPCImm26(instr, delta_pc, n_bits); - return 9; - } - } - } - } - case 'j': { // Absolute address for jump instructions. - DCHECK(STRING_STARTS_WITH(format, "imm26j")); - PrintPCImm26(instr); - break; - } - } - return 6; - } else if (format[3] == '5') { - DCHECK(STRING_STARTS_WITH(format, "imm5")); - if (format[4] == 'u') { - DCHECK(STRING_STARTS_WITH(format, "imm5u")); - PrintMsaImm5(instr); - } else if (format[4] == 's') { - DCHECK(STRING_STARTS_WITH(format, "imm5s")); - PrintMsaSImm5(instr); - } - return 5; - } else if (format[3] == '8') { - DCHECK(STRING_STARTS_WITH(format, "imm8")); - PrintMsaImm8(instr); - return 4; - } else if (format[3] == '9') { - DCHECK(STRING_STARTS_WITH(format, "imm9")); - if (format[4] == 'u') { - DCHECK(STRING_STARTS_WITH(format, "imm9u")); - PrintUImm9(instr); - } else if (format[4] == 's') { - DCHECK(STRING_STARTS_WITH(format, "imm9s")); - PrintSImm9(instr); - } - return 5; - } else if (format[3] == 'b') { - DCHECK(STRING_STARTS_WITH(format, "immb")); - PrintMsaImmBit(instr); - return 4; - } else if (format[3] == 'e') { - DCHECK(STRING_STARTS_WITH(format, "imme")); - PrintMsaImmElm(instr); - return 4; - } - UNREACHABLE(); - } - case 'r': { // 'r: registers. - return FormatRegister(instr, format); - } - case 'f': { // 'f: FPUregisters. - return FormatFPURegister(instr, format); - } - case 'w': { // 'w: MSA Register - return FormatMSARegister(instr, format); - } - case 's': { // 'sa. - switch (format[1]) { - case 'a': - if (format[2] == '2') { - DCHECK(STRING_STARTS_WITH(format, "sa2")); // 'sa2 - PrintLsaSa(instr); - return 3; - } else { - DCHECK(STRING_STARTS_WITH(format, "sa")); - PrintSa(instr); - return 2; - } - case 'd': { - DCHECK(STRING_STARTS_WITH(format, "sd")); - PrintSd(instr); - return 2; - } - case 's': { - if (format[2] == '1') { - DCHECK(STRING_STARTS_WITH(format, "ss1")); /* ext size */ - PrintSs1(instr); - return 3; - } else { - DCHECK(STRING_STARTS_WITH(format, "ss2")); /* ins size */ - PrintSs2(instr); - return 3; - } - } - } - } - case 'b': { - switch (format[1]) { - case 'c': { // 'bc - Special for bc1 cc field. - DCHECK(STRING_STARTS_WITH(format, "bc")); - PrintBc(instr); - return 2; - } - case 'p': { - switch (format[2]) { - case '2': { // 'bp2 - DCHECK(STRING_STARTS_WITH(format, "bp2")); - PrintBp2(instr); - return 3; - } - } - } - } - } - case 'C': { // 'Cc - Special for c.xx.d cc field. - DCHECK(STRING_STARTS_WITH(format, "Cc")); - PrintCc(instr); - return 2; - } - case 't': - if (instr->IsMSAInstr()) { - PrintMsaDataFormat(instr); - } else { - PrintFormat(instr); - } - return 1; - } - UNREACHABLE(); -} - -// Format takes a formatting string for a whole instruction and prints it into -// the output buffer. All escaped options are handed to FormatOption to be -// parsed further. -void Decoder::Format(Instruction* instr, const char* format) { - char cur = *format++; - while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) { - if (cur == '\'') { // Single quote is used as the formatting escape. - format += FormatOption(instr, format); - } else { - out_buffer_[out_buffer_pos_++] = cur; - } - cur = *format++; - } - out_buffer_[out_buffer_pos_] = '\0'; -} - -// For currently unimplemented decodings the disassembler calls Unknown(instr) -// which will just print "unknown" of the instruction bits. -void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); } - -bool Decoder::DecodeTypeRegisterRsType(Instruction* instr) { - switch (instr->FunctionFieldRaw()) { - case RINT: - Format(instr, "rint.'t 'fd, 'fs"); - break; - case MIN: - Format(instr, "min.'t 'fd, 'fs, 'ft"); - break; - case MAX: - Format(instr, "max.'t 'fd, 'fs, 'ft"); - break; - case MINA: - Format(instr, "mina.'t 'fd, 'fs, 'ft"); - break; - case MAXA: - Format(instr, "maxa.'t 'fd, 'fs, 'ft"); - break; - case SEL: - Format(instr, "sel.'t 'fd, 'fs, 'ft"); - break; - case SELEQZ_C: - Format(instr, "seleqz.'t 'fd, 'fs, 'ft"); - break; - case SELNEZ_C: - Format(instr, "selnez.'t 'fd, 'fs, 'ft"); - break; - case MOVZ_C: - Format(instr, "movz.'t 'fd, 'fs, 'rt"); - break; - case MOVN_C: - Format(instr, "movn.'t 'fd, 'fs, 'rt"); - break; - case MOVF: - if (instr->Bit(16)) { - Format(instr, "movt.'t 'fd, 'fs, 'Cc"); - } else { - Format(instr, "movf.'t 'fd, 'fs, 'Cc"); - } - break; - case ADD_D: - Format(instr, "add.'t 'fd, 'fs, 'ft"); - break; - case SUB_D: - Format(instr, "sub.'t 'fd, 'fs, 'ft"); - break; - case MUL_D: - Format(instr, "mul.'t 'fd, 'fs, 'ft"); - break; - case DIV_D: - Format(instr, "div.'t 'fd, 'fs, 'ft"); - break; - case ABS_D: - Format(instr, "abs.'t 'fd, 'fs"); - break; - case MOV_D: - Format(instr, "mov.'t 'fd, 'fs"); - break; - case NEG_D: - Format(instr, "neg.'t 'fd, 'fs"); - break; - case SQRT_D: - Format(instr, "sqrt.'t 'fd, 'fs"); - break; - case RECIP_D: - Format(instr, "recip.'t 'fd, 'fs"); - break; - case RSQRT_D: - Format(instr, "rsqrt.'t 'fd, 'fs"); - break; - case CVT_W_D: - Format(instr, "cvt.w.'t 'fd, 'fs"); - break; - case CVT_L_D: - Format(instr, "cvt.l.'t 'fd, 'fs"); - break; - case TRUNC_W_D: - Format(instr, "trunc.w.'t 'fd, 'fs"); - break; - case TRUNC_L_D: - Format(instr, "trunc.l.'t 'fd, 'fs"); - break; - case ROUND_W_D: - Format(instr, "round.w.'t 'fd, 'fs"); - break; - case ROUND_L_D: - Format(instr, "round.l.'t 'fd, 'fs"); - break; - case FLOOR_W_D: - Format(instr, "floor.w.'t 'fd, 'fs"); - break; - case FLOOR_L_D: - Format(instr, "floor.l.'t 'fd, 'fs"); - break; - case CEIL_W_D: - Format(instr, "ceil.w.'t 'fd, 'fs"); - break; - case CLASS_D: - Format(instr, "class.'t 'fd, 'fs"); - break; - case CEIL_L_D: - Format(instr, "ceil.l.'t 'fd, 'fs"); - break; - case CVT_S_D: - Format(instr, "cvt.s.'t 'fd, 'fs"); - break; - case C_F_D: - Format(instr, "c.f.'t 'fs, 'ft, 'Cc"); - break; - case C_UN_D: - Format(instr, "c.un.'t 'fs, 'ft, 'Cc"); - break; - case C_EQ_D: - Format(instr, "c.eq.'t 'fs, 'ft, 'Cc"); - break; - case C_UEQ_D: - Format(instr, "c.ueq.'t 'fs, 'ft, 'Cc"); - break; - case C_OLT_D: - Format(instr, "c.olt.'t 'fs, 'ft, 'Cc"); - break; - case C_ULT_D: - Format(instr, "c.ult.'t 'fs, 'ft, 'Cc"); - break; - case C_OLE_D: - Format(instr, "c.ole.'t 'fs, 'ft, 'Cc"); - break; - case C_ULE_D: - Format(instr, "c.ule.'t 'fs, 'ft, 'Cc"); - break; - default: - return false; - } - return true; -} - -void Decoder::DecodeTypeRegisterSRsType(Instruction* instr) { - if (!DecodeTypeRegisterRsType(instr)) { - switch (instr->FunctionFieldRaw()) { - case CVT_D_S: - Format(instr, "cvt.d.'t 'fd, 'fs"); - break; - case MADDF_S: - Format(instr, "maddf.s 'fd, 'fs, 'ft"); - break; - case MSUBF_S: - Format(instr, "msubf.s 'fd, 'fs, 'ft"); - break; - default: - Format(instr, "unknown.cop1.'t"); - break; - } - } -} - -void Decoder::DecodeTypeRegisterDRsType(Instruction* instr) { - if (!DecodeTypeRegisterRsType(instr)) { - switch (instr->FunctionFieldRaw()) { - case MADDF_D: - Format(instr, "maddf.d 'fd, 'fs, 'ft"); - break; - case MSUBF_D: - Format(instr, "msubf.d 'fd, 'fs, 'ft"); - break; - default: - Format(instr, "unknown.cop1.'t"); - break; - } - } -} - -void Decoder::DecodeTypeRegisterLRsType(Instruction* instr) { - switch (instr->FunctionFieldRaw()) { - case CVT_D_L: - Format(instr, "cvt.d.l 'fd, 'fs"); - break; - case CVT_S_L: - Format(instr, "cvt.s.l 'fd, 'fs"); - break; - case CMP_AF: - Format(instr, "cmp.af.d 'fd, 'fs, 'ft"); - break; - case CMP_UN: - Format(instr, "cmp.un.d 'fd, 'fs, 'ft"); - break; - case CMP_EQ: - Format(instr, "cmp.eq.d 'fd, 'fs, 'ft"); - break; - case CMP_UEQ: - Format(instr, "cmp.ueq.d 'fd, 'fs, 'ft"); - break; - case CMP_LT: - Format(instr, "cmp.lt.d 'fd, 'fs, 'ft"); - break; - case CMP_ULT: - Format(instr, "cmp.ult.d 'fd, 'fs, 'ft"); - break; - case CMP_LE: - Format(instr, "cmp.le.d 'fd, 'fs, 'ft"); - break; - case CMP_ULE: - Format(instr, "cmp.ule.d 'fd, 'fs, 'ft"); - break; - case CMP_OR: - Format(instr, "cmp.or.d 'fd, 'fs, 'ft"); - break; - case CMP_UNE: - Format(instr, "cmp.une.d 'fd, 'fs, 'ft"); - break; - case CMP_NE: - Format(instr, "cmp.ne.d 'fd, 'fs, 'ft"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeRegisterWRsType(Instruction* instr) { - switch (instr->FunctionValue()) { - case CVT_S_W: // Convert word to float (single). - Format(instr, "cvt.s.w 'fd, 'fs"); - break; - case CVT_D_W: // Convert word to double. - Format(instr, "cvt.d.w 'fd, 'fs"); - break; - case CMP_AF: - Format(instr, "cmp.af.s 'fd, 'fs, 'ft"); - break; - case CMP_UN: - Format(instr, "cmp.un.s 'fd, 'fs, 'ft"); - break; - case CMP_EQ: - Format(instr, "cmp.eq.s 'fd, 'fs, 'ft"); - break; - case CMP_UEQ: - Format(instr, "cmp.ueq.s 'fd, 'fs, 'ft"); - break; - case CMP_LT: - Format(instr, "cmp.lt.s 'fd, 'fs, 'ft"); - break; - case CMP_ULT: - Format(instr, "cmp.ult.s 'fd, 'fs, 'ft"); - break; - case CMP_LE: - Format(instr, "cmp.le.s 'fd, 'fs, 'ft"); - break; - case CMP_ULE: - Format(instr, "cmp.ule.s 'fd, 'fs, 'ft"); - break; - case CMP_OR: - Format(instr, "cmp.or.s 'fd, 'fs, 'ft"); - break; - case CMP_UNE: - Format(instr, "cmp.une.s 'fd, 'fs, 'ft"); - break; - case CMP_NE: - Format(instr, "cmp.ne.s 'fd, 'fs, 'ft"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeRegisterSPECIAL(Instruction* instr) { - switch (instr->FunctionFieldRaw()) { - case JR: - Format(instr, "jr 'rs"); - break; - case JALR: - Format(instr, "jalr 'rs, 'rd"); - break; - case SLL: - if (0x0 == static_cast(instr->InstructionBits())) - Format(instr, "nop"); - else - Format(instr, "sll 'rd, 'rt, 'sa"); - break; - case SRL: - if (instr->RsValue() == 0) { - Format(instr, "srl 'rd, 'rt, 'sa"); - } else { - if (IsMipsArchVariant(kMips32r2)) { - Format(instr, "rotr 'rd, 'rt, 'sa"); - } else { - Unknown(instr); - } - } - break; - case SRA: - Format(instr, "sra 'rd, 'rt, 'sa"); - break; - case SLLV: - Format(instr, "sllv 'rd, 'rt, 'rs"); - break; - case SRLV: - if (instr->SaValue() == 0) { - Format(instr, "srlv 'rd, 'rt, 'rs"); - } else { - if (IsMipsArchVariant(kMips32r2)) { - Format(instr, "rotrv 'rd, 'rt, 'rs"); - } else { - Unknown(instr); - } - } - break; - case SRAV: - Format(instr, "srav 'rd, 'rt, 'rs"); - break; - case LSA: - Format(instr, "lsa 'rd, 'rt, 'rs, 'sa2"); - break; - case MFHI: - if (instr->Bits(25, 16) == 0) { - Format(instr, "mfhi 'rd"); - } else { - if ((instr->FunctionFieldRaw() == CLZ_R6) && (instr->FdValue() == 1)) { - Format(instr, "clz 'rd, 'rs"); - } else if ((instr->FunctionFieldRaw() == CLO_R6) && - (instr->FdValue() == 1)) { - Format(instr, "clo 'rd, 'rs"); - } - } - break; - case MFLO: - Format(instr, "mflo 'rd"); - break; - case MULT: // @Mips32r6 == MUL_MUH. - if (!IsMipsArchVariant(kMips32r6)) { - Format(instr, "mult 'rs, 'rt"); - } else { - if (instr->SaValue() == MUL_OP) { - Format(instr, "mul 'rd, 'rs, 'rt"); - } else { - Format(instr, "muh 'rd, 'rs, 'rt"); - } - } - break; - case MULTU: // @Mips32r6 == MUL_MUH_U. - if (!IsMipsArchVariant(kMips32r6)) { - Format(instr, "multu 'rs, 'rt"); - } else { - if (instr->SaValue() == MUL_OP) { - Format(instr, "mulu 'rd, 'rs, 'rt"); - } else { - Format(instr, "muhu 'rd, 'rs, 'rt"); - } - } - break; - case DIV: // @Mips32r6 == DIV_MOD. - if (!IsMipsArchVariant(kMips32r6)) { - Format(instr, "div 'rs, 'rt"); - } else { - if (instr->SaValue() == DIV_OP) { - Format(instr, "div 'rd, 'rs, 'rt"); - } else { - Format(instr, "mod 'rd, 'rs, 'rt"); - } - } - break; - case DIVU: // @Mips32r6 == DIV_MOD_U. - if (!IsMipsArchVariant(kMips32r6)) { - Format(instr, "divu 'rs, 'rt"); - } else { - if (instr->SaValue() == DIV_OP) { - Format(instr, "divu 'rd, 'rs, 'rt"); - } else { - Format(instr, "modu 'rd, 'rs, 'rt"); - } - } - break; - case ADD: - Format(instr, "add 'rd, 'rs, 'rt"); - break; - case ADDU: - Format(instr, "addu 'rd, 'rs, 'rt"); - break; - case SUB: - Format(instr, "sub 'rd, 'rs, 'rt"); - break; - case SUBU: - Format(instr, "subu 'rd, 'rs, 'rt"); - break; - case AND: - Format(instr, "and 'rd, 'rs, 'rt"); - break; - case OR: - if (0 == instr->RsValue()) { - Format(instr, "mov 'rd, 'rt"); - } else if (0 == instr->RtValue()) { - Format(instr, "mov 'rd, 'rs"); - } else { - Format(instr, "or 'rd, 'rs, 'rt"); - } - break; - case XOR: - Format(instr, "xor 'rd, 'rs, 'rt"); - break; - case NOR: - Format(instr, "nor 'rd, 'rs, 'rt"); - break; - case SLT: - Format(instr, "slt 'rd, 'rs, 'rt"); - break; - case SLTU: - Format(instr, "sltu 'rd, 'rs, 'rt"); - break; - case BREAK: - Format(instr, "break, code: 'code"); - break; - case TGE: - Format(instr, "tge 'rs, 'rt, code: 'code"); - break; - case TGEU: - Format(instr, "tgeu 'rs, 'rt, code: 'code"); - break; - case TLT: - Format(instr, "tlt 'rs, 'rt, code: 'code"); - break; - case TLTU: - Format(instr, "tltu 'rs, 'rt, code: 'code"); - break; - case TEQ: - Format(instr, "teq 'rs, 'rt, code: 'code"); - break; - case TNE: - Format(instr, "tne 'rs, 'rt, code: 'code"); - break; - case SYNC: - Format(instr, "sync"); - break; - case MOVZ: - Format(instr, "movz 'rd, 'rs, 'rt"); - break; - case MOVN: - Format(instr, "movn 'rd, 'rs, 'rt"); - break; - case MOVCI: - if (instr->Bit(16)) { - Format(instr, "movt 'rd, 'rs, 'bc"); - } else { - Format(instr, "movf 'rd, 'rs, 'bc"); - } - break; - case SELEQZ_S: - Format(instr, "seleqz 'rd, 'rs, 'rt"); - break; - case SELNEZ_S: - Format(instr, "selnez 'rd, 'rs, 'rt"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeRegisterSPECIAL2(Instruction* instr) { - switch (instr->FunctionFieldRaw()) { - case MUL: - Format(instr, "mul 'rd, 'rs, 'rt"); - break; - case CLZ: - if (!IsMipsArchVariant(kMips32r6)) { - Format(instr, "clz 'rd, 'rs"); - } - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) { - switch (instr->FunctionFieldRaw()) { - case INS: { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - Format(instr, "ins 'rt, 'rs, 'sa, 'ss2"); - } else { - Unknown(instr); - } - break; - } - case EXT: { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - Format(instr, "ext 'rt, 'rs, 'sa, 'ss1"); - } else { - Unknown(instr); - } - break; - } - case BSHFL: { - int sa = instr->SaFieldRaw() >> kSaShift; - switch (sa) { - case BITSWAP: { - if (IsMipsArchVariant(kMips32r6)) { - Format(instr, "bitswap 'rd, 'rt"); - } else { - Unknown(instr); - } - break; - } - case SEB: { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - Format(instr, "seb 'rd, 'rt"); - } else { - Unknown(instr); - } - break; - } - case SEH: { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - Format(instr, "seh 'rd, 'rt"); - } else { - Unknown(instr); - } - break; - } - case WSBH: { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - Format(instr, "wsbh 'rd, 'rt"); - } else { - Unknown(instr); - } - break; - } - case LL_R6: { - DCHECK(IsMipsArchVariant(kMips32r6)); - Format(instr, "llwp 'rd, 'rt, 0('rs)"); - break; - } - case SC_R6: { - DCHECK(IsMipsArchVariant(kMips32r6)); - Format(instr, "scwp 'rd, 'rt, 0('rs)"); - break; - } - default: { - sa >>= kBp2Bits; - switch (sa) { - case ALIGN: { - if (IsMipsArchVariant(kMips32r6)) { - Format(instr, "align 'rd, 'rs, 'rt, 'bp2"); - } else { - Unknown(instr); - } - break; - } - default: - UNREACHABLE(); - } - } - } - break; - } - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeRegister(Instruction* instr) { - switch (instr->OpcodeFieldRaw()) { - case COP1: // Coprocessor instructions. - switch (instr->RsFieldRaw()) { - case BC1: // bc1 handled in DecodeTypeImmediate. - UNREACHABLE(); - case MFC1: - Format(instr, "mfc1 'rt, 'fs"); - break; - case MFHC1: - Format(instr, "mfhc1 'rt, 'fs"); - break; - case MTC1: - Format(instr, "mtc1 'rt, 'fs"); - break; - // These are called "fs" too, although they are not FPU registers. - case CTC1: - Format(instr, "ctc1 'rt, 'fs"); - break; - case CFC1: - Format(instr, "cfc1 'rt, 'fs"); - break; - case MTHC1: - Format(instr, "mthc1 'rt, 'fs"); - break; - case S: - DecodeTypeRegisterSRsType(instr); - break; - case D: - DecodeTypeRegisterDRsType(instr); - break; - case L: - DecodeTypeRegisterLRsType(instr); - break; - case W: - DecodeTypeRegisterWRsType(instr); - break; - case PS: - UNIMPLEMENTED_MIPS(); - break; - default: - UNREACHABLE(); - } - break; - case COP1X: - switch (instr->FunctionFieldRaw()) { - case MADD_S: - Format(instr, "madd.s 'fd, 'fr, 'fs, 'ft"); - break; - case MADD_D: - Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft"); - break; - case MSUB_S: - Format(instr, "msub.s 'fd, 'fr, 'fs, 'ft"); - break; - case MSUB_D: - Format(instr, "msub.d 'fd, 'fr, 'fs, 'ft"); - break; - default: - UNREACHABLE(); - } - break; - case SPECIAL: - DecodeTypeRegisterSPECIAL(instr); - break; - case SPECIAL2: - DecodeTypeRegisterSPECIAL2(instr); - break; - case SPECIAL3: - DecodeTypeRegisterSPECIAL3(instr); - break; - case MSA: - switch (instr->MSAMinorOpcodeField()) { - case kMsaMinor3R: - DecodeTypeMsa3R(instr); - break; - case kMsaMinor3RF: - DecodeTypeMsa3RF(instr); - break; - case kMsaMinorVEC: - DecodeTypeMsaVec(instr); - break; - case kMsaMinor2R: - DecodeTypeMsa2R(instr); - break; - case kMsaMinor2RF: - DecodeTypeMsa2RF(instr); - break; - case kMsaMinorELM: - DecodeTypeMsaELM(instr); - break; - default: - UNREACHABLE(); - } - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeImmediateSPECIAL3(Instruction* instr) { - switch (instr->FunctionFieldRaw()) { - case LL_R6: { - if (IsMipsArchVariant(kMips32r6)) { - if (instr->Bit(6)) { - Format(instr, "llx 'rt, 'imm9s('rs)"); - } else { - Format(instr, "ll 'rt, 'imm9s('rs)"); - } - } else { - Unknown(instr); - } - break; - } - case SC_R6: { - if (IsMipsArchVariant(kMips32r6)) { - if (instr->Bit(6)) { - Format(instr, "scx 'rt, 'imm9s('rs)"); - } else { - Format(instr, "sc 'rt, 'imm9s('rs)"); - } - } else { - Unknown(instr); - } - break; - } - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeImmediate(Instruction* instr) { - switch (instr->OpcodeFieldRaw()) { - case COP1: - switch (instr->RsFieldRaw()) { - case BC1: - if (instr->FBtrueValue()) { - Format(instr, "bc1t 'bc, 'imm16u -> 'imm16p4s2"); - } else { - Format(instr, "bc1f 'bc, 'imm16u -> 'imm16p4s2"); - } - break; - case BC1EQZ: - Format(instr, "bc1eqz 'ft, 'imm16u -> 'imm16p4s2"); - break; - case BC1NEZ: - Format(instr, "bc1nez 'ft, 'imm16u -> 'imm16p4s2"); - break; - case BZ_V: - case BZ_B: - case BZ_H: - case BZ_W: - case BZ_D: - Format(instr, "bz.'t 'wt, 'imm16s -> 'imm16p4s2"); - break; - case BNZ_V: - case BNZ_B: - case BNZ_H: - case BNZ_W: - case BNZ_D: - Format(instr, "bnz.'t 'wt, 'imm16s -> 'imm16p4s2"); - break; - default: - UNREACHABLE(); - } - - break; // Case COP1. - // ------------- REGIMM class. - case REGIMM: - switch (instr->RtFieldRaw()) { - case BLTZ: - Format(instr, "bltz 'rs, 'imm16u -> 'imm16p4s2"); - break; - case BLTZAL: - if (instr->RsValue() == 0) { - Format(instr, "nal"); - } else { - Format(instr, "bltzal 'rs, 'imm16u -> 'imm16p4s2"); - } - break; - case BGEZ: - Format(instr, "bgez 'rs, 'imm16u -> 'imm16p4s2"); - break; - case BGEZAL: { - if (instr->RsValue() == 0) - Format(instr, "bal 'imm16s -> 'imm16p4s2"); - else - Format(instr, "bgezal 'rs, 'imm16u -> 'imm16p4s2"); - break; - } - case BGEZALL: - Format(instr, "bgezall 'rs, 'imm16u -> 'imm16p4s2"); - break; - default: - UNREACHABLE(); - } - break; // Case REGIMM. - // ------------- Branch instructions. - case BEQ: - Format(instr, "beq 'rs, 'rt, 'imm16u -> 'imm16p4s2"); - break; - case BC: - Format(instr, "bc 'imm26s -> 'imm26p4s2"); - break; - case BALC: - Format(instr, "balc 'imm26s -> 'imm26p4s2"); - break; - case BNE: - Format(instr, "bne 'rs, 'rt, 'imm16u -> 'imm16p4s2"); - break; - case BLEZ: - if ((instr->RtValue() == 0) && (instr->RsValue() != 0)) { - Format(instr, "blez 'rs, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RtValue() != instr->RsValue()) && - (instr->RsValue() != 0) && (instr->RtValue() != 0)) { - Format(instr, "bgeuc 'rs, 'rt, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RtValue() == instr->RsValue()) && - (instr->RtValue() != 0)) { - Format(instr, "bgezalc 'rs, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) { - Format(instr, "blezalc 'rt, 'imm16u -> 'imm16p4s2"); - } else { - UNREACHABLE(); - } - break; - case BGTZ: - if ((instr->RtValue() == 0) && (instr->RsValue() != 0)) { - Format(instr, "bgtz 'rs, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RtValue() != instr->RsValue()) && - (instr->RsValue() != 0) && (instr->RtValue() != 0)) { - Format(instr, "bltuc 'rs, 'rt, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RtValue() == instr->RsValue()) && - (instr->RtValue() != 0)) { - Format(instr, "bltzalc 'rt, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) { - Format(instr, "bgtzalc 'rt, 'imm16u -> 'imm16p4s2"); - } else { - UNREACHABLE(); - } - break; - case BLEZL: - if ((instr->RtValue() == instr->RsValue()) && (instr->RtValue() != 0)) { - Format(instr, "bgezc 'rt, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RtValue() != instr->RsValue()) && - (instr->RsValue() != 0) && (instr->RtValue() != 0)) { - Format(instr, "bgec 'rs, 'rt, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) { - Format(instr, "blezc 'rt, 'imm16u -> 'imm16p4s2"); - } else { - UNREACHABLE(); - } - break; - case BGTZL: - if ((instr->RtValue() == instr->RsValue()) && (instr->RtValue() != 0)) { - Format(instr, "bltzc 'rt, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RtValue() != instr->RsValue()) && - (instr->RsValue() != 0) && (instr->RtValue() != 0)) { - Format(instr, "bltc 'rs, 'rt, 'imm16u -> 'imm16p4s2"); - } else if ((instr->RsValue() == 0) && (instr->RtValue() != 0)) { - Format(instr, "bgtzc 'rt, 'imm16u -> 'imm16p4s2"); - } else { - UNREACHABLE(); - } - break; - case POP66: - if (instr->RsValue() == JIC) { - Format(instr, "jic 'rt, 'imm16s"); - } else { - Format(instr, "beqzc 'rs, 'imm21s -> 'imm21p4s2"); - } - break; - case POP76: - if (instr->RsValue() == JIALC) { - Format(instr, "jialc 'rt, 'imm16s"); - } else { - Format(instr, "bnezc 'rs, 'imm21s -> 'imm21p4s2"); - } - break; - // ------------- Arithmetic instructions. - case ADDI: - if (!IsMipsArchVariant(kMips32r6)) { - Format(instr, "addi 'rt, 'rs, 'imm16s"); - } else { - int rs_reg = instr->RsValue(); - int rt_reg = instr->RtValue(); - // Check if BOVC, BEQZALC or BEQC instruction. - if (rs_reg >= rt_reg) { - Format(instr, "bovc 'rs, 'rt, 'imm16s -> 'imm16p4s2"); - } else { - DCHECK_GT(rt_reg, 0); - if (rs_reg == 0) { - Format(instr, "beqzalc 'rt, 'imm16s -> 'imm16p4s2"); - } else { - Format(instr, "beqc 'rs, 'rt, 'imm16s -> 'imm16p4s2"); - } - } - } - break; - case DADDI: - if (IsMipsArchVariant(kMips32r6)) { - int rs_reg = instr->RsValue(); - int rt_reg = instr->RtValue(); - // Check if BNVC, BNEZALC or BNEC instruction. - if (rs_reg >= rt_reg) { - Format(instr, "bnvc 'rs, 'rt, 'imm16s -> 'imm16p4s2"); - } else { - DCHECK_GT(rt_reg, 0); - if (rs_reg == 0) { - Format(instr, "bnezalc 'rt, 'imm16s -> 'imm16p4s2"); - } else { - Format(instr, "bnec 'rs, 'rt, 'imm16s -> 'imm16p4s2"); - } - } - } - break; - case ADDIU: - Format(instr, "addiu 'rt, 'rs, 'imm16s"); - break; - case SLTI: - Format(instr, "slti 'rt, 'rs, 'imm16s"); - break; - case SLTIU: - Format(instr, "sltiu 'rt, 'rs, 'imm16u"); - break; - case ANDI: - Format(instr, "andi 'rt, 'rs, 'imm16x"); - break; - case ORI: - Format(instr, "ori 'rt, 'rs, 'imm16x"); - break; - case XORI: - Format(instr, "xori 'rt, 'rs, 'imm16x"); - break; - case LUI: - if (!IsMipsArchVariant(kMips32r6)) { - Format(instr, "lui 'rt, 'imm16x"); - } else { - if (instr->RsValue() != 0) { - Format(instr, "aui 'rt, 'rs, 'imm16x"); - } else { - Format(instr, "lui 'rt, 'imm16x"); - } - } - break; - // ------------- Memory instructions. - case LB: - Format(instr, "lb 'rt, 'imm16s('rs)"); - break; - case LH: - Format(instr, "lh 'rt, 'imm16s('rs)"); - break; - case LWL: - Format(instr, "lwl 'rt, 'imm16s('rs)"); - break; - case LW: - Format(instr, "lw 'rt, 'imm16s('rs)"); - break; - case LBU: - Format(instr, "lbu 'rt, 'imm16s('rs)"); - break; - case LHU: - Format(instr, "lhu 'rt, 'imm16s('rs)"); - break; - case LWR: - Format(instr, "lwr 'rt, 'imm16s('rs)"); - break; - case PREF: - Format(instr, "pref 'rt, 'imm16s('rs)"); - break; - case SB: - Format(instr, "sb 'rt, 'imm16s('rs)"); - break; - case SH: - Format(instr, "sh 'rt, 'imm16s('rs)"); - break; - case SWL: - Format(instr, "swl 'rt, 'imm16s('rs)"); - break; - case SW: - Format(instr, "sw 'rt, 'imm16s('rs)"); - break; - case SWR: - Format(instr, "swr 'rt, 'imm16s('rs)"); - break; - case LL: - if (IsMipsArchVariant(kMips32r6)) { - Unknown(instr); - } else { - Format(instr, "ll 'rt, 'imm16s('rs)"); - } - break; - case SC: - if (IsMipsArchVariant(kMips32r6)) { - Unknown(instr); - } else { - Format(instr, "sc 'rt, 'imm16s('rs)"); - } - break; - case LWC1: - Format(instr, "lwc1 'ft, 'imm16s('rs)"); - break; - case LDC1: - Format(instr, "ldc1 'ft, 'imm16s('rs)"); - break; - case SWC1: - Format(instr, "swc1 'ft, 'imm16s('rs)"); - break; - case SDC1: - Format(instr, "sdc1 'ft, 'imm16s('rs)"); - break; - case PCREL: { - int32_t imm21 = instr->Imm21Value(); - // rt field: 5-bits checking - uint8_t rt = (imm21 >> kImm16Bits); - switch (rt) { - case ALUIPC: - Format(instr, "aluipc 'rs, 'imm16s"); - break; - case AUIPC: - Format(instr, "auipc 'rs, 'imm16s"); - break; - default: { - // rt field: checking of the most significant 2-bits - rt = (imm21 >> kImm19Bits); - switch (rt) { - case LWPC: - Format(instr, "lwpc 'rs, 'imm19s"); - break; - case ADDIUPC: - Format(instr, "addiupc 'rs, 'imm19s"); - break; - default: - UNREACHABLE(); - } - } - } - break; - } - case SPECIAL3: - DecodeTypeImmediateSPECIAL3(instr); - break; - case MSA: - switch (instr->MSAMinorOpcodeField()) { - case kMsaMinorI8: - DecodeTypeMsaI8(instr); - break; - case kMsaMinorI5: - DecodeTypeMsaI5(instr); - break; - case kMsaMinorI10: - DecodeTypeMsaI10(instr); - break; - case kMsaMinorELM: - DecodeTypeMsaELM(instr); - break; - case kMsaMinorBIT: - DecodeTypeMsaBIT(instr); - break; - case kMsaMinorMI10: - DecodeTypeMsaMI10(instr); - break; - default: - UNREACHABLE(); - } - break; - default: - printf("a 0x%x \n", instr->OpcodeFieldRaw()); - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeJump(Instruction* instr) { - switch (instr->OpcodeFieldRaw()) { - case J: - Format(instr, "j 'imm26x -> 'imm26j"); - break; - case JAL: - Format(instr, "jal 'imm26x -> 'imm26j"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsaI8(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsaI8Mask; - - switch (opcode) { - case ANDI_B: - Format(instr, "andi.b 'wd, 'ws, 'imm8"); - break; - case ORI_B: - Format(instr, "ori.b 'wd, 'ws, 'imm8"); - break; - case NORI_B: - Format(instr, "nori.b 'wd, 'ws, 'imm8"); - break; - case XORI_B: - Format(instr, "xori.b 'wd, 'ws, 'imm8"); - break; - case BMNZI_B: - Format(instr, "bmnzi.b 'wd, 'ws, 'imm8"); - break; - case BMZI_B: - Format(instr, "bmzi.b 'wd, 'ws, 'imm8"); - break; - case BSELI_B: - Format(instr, "bseli.b 'wd, 'ws, 'imm8"); - break; - case SHF_B: - Format(instr, "shf.b 'wd, 'ws, 'imm8"); - break; - case SHF_H: - Format(instr, "shf.h 'wd, 'ws, 'imm8"); - break; - case SHF_W: - Format(instr, "shf.w 'wd, 'ws, 'imm8"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsaI5(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsaI5Mask; - - switch (opcode) { - case ADDVI: - Format(instr, "addvi.'t 'wd, 'ws, 'imm5u"); - break; - case SUBVI: - Format(instr, "subvi.'t 'wd, 'ws, 'imm5u"); - break; - case MAXI_S: - Format(instr, "maxi_s.'t 'wd, 'ws, 'imm5s"); - break; - case MAXI_U: - Format(instr, "maxi_u.'t 'wd, 'ws, 'imm5u"); - break; - case MINI_S: - Format(instr, "mini_s.'t 'wd, 'ws, 'imm5s"); - break; - case MINI_U: - Format(instr, "mini_u.'t 'wd, 'ws, 'imm5u"); - break; - case CEQI: - Format(instr, "ceqi.'t 'wd, 'ws, 'imm5s"); - break; - case CLTI_S: - Format(instr, "clti_s.'t 'wd, 'ws, 'imm5s"); - break; - case CLTI_U: - Format(instr, "clti_u.'t 'wd, 'ws, 'imm5u"); - break; - case CLEI_S: - Format(instr, "clei_s.'t 'wd, 'ws, 'imm5s"); - break; - case CLEI_U: - Format(instr, "clei_u.'t 'wd, 'ws, 'imm5u"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsaI10(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsaI5Mask; - if (opcode == LDI) { - Format(instr, "ldi.'t 'wd, 'imm10s1"); - } else { - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsaELM(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsaELMMask; - switch (opcode) { - case SLDI: - if (instr->Bits(21, 16) == 0x3E) { - Format(instr, "ctcmsa "); - PrintMSAControlRegister(instr->WdValue()); - Print(", "); - PrintRegister(instr->WsValue()); - } else { - Format(instr, "sldi.'t 'wd, 'ws['imme]"); - } - break; - case SPLATI: - if (instr->Bits(21, 16) == 0x3E) { - Format(instr, "cfcmsa "); - PrintRegister(instr->WdValue()); - Print(", "); - PrintMSAControlRegister(instr->WsValue()); - } else { - Format(instr, "splati.'t 'wd, 'ws['imme]"); - } - break; - case COPY_S: - if (instr->Bits(21, 16) == 0x3E) { - Format(instr, "move.v 'wd, 'ws"); - } else { - Format(instr, "copy_s.'t "); - PrintMsaCopy(instr); - } - break; - case COPY_U: - Format(instr, "copy_u.'t "); - PrintMsaCopy(instr); - break; - case INSERT: - Format(instr, "insert.'t 'wd['imme], "); - PrintRegister(instr->WsValue()); - break; - case INSVE: - Format(instr, "insve.'t 'wd['imme], 'ws[0]"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsaBIT(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsaBITMask; - - switch (opcode) { - case SLLI: - Format(instr, "slli.'t 'wd, 'ws, 'immb"); - break; - case SRAI: - Format(instr, "srai.'t 'wd, 'ws, 'immb"); - break; - case SRLI: - Format(instr, "srli.'t 'wd, 'ws, 'immb"); - break; - case BCLRI: - Format(instr, "bclri.'t 'wd, 'ws, 'immb"); - break; - case BSETI: - Format(instr, "bseti.'t 'wd, 'ws, 'immb"); - break; - case BNEGI: - Format(instr, "bnegi.'t 'wd, 'ws, 'immb"); - break; - case BINSLI: - Format(instr, "binsli.'t 'wd, 'ws, 'immb"); - break; - case BINSRI: - Format(instr, "binsri.'t 'wd, 'ws, 'immb"); - break; - case SAT_S: - Format(instr, "sat_s.'t 'wd, 'ws, 'immb"); - break; - case SAT_U: - Format(instr, "sat_u.'t 'wd, 'ws, 'immb"); - break; - case SRARI: - Format(instr, "srari.'t 'wd, 'ws, 'immb"); - break; - case SRLRI: - Format(instr, "srlri.'t 'wd, 'ws, 'immb"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsaMI10(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsaMI10Mask; - if (opcode == MSA_LD) { - Format(instr, "ld.'t 'wd, 'imm10s2("); - PrintRegister(instr->WsValue()); - Print(")"); - } else if (opcode == MSA_ST) { - Format(instr, "st.'t 'wd, 'imm10s2("); - PrintRegister(instr->WsValue()); - Print(")"); - } else { - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsa3R(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsa3RMask; - switch (opcode) { - case SLL_MSA: - Format(instr, "sll.'t 'wd, 'ws, 'wt"); - break; - case SRA_MSA: - Format(instr, "sra.'t 'wd, 'ws, 'wt"); - break; - case SRL_MSA: - Format(instr, "srl.'t 'wd, 'ws, 'wt"); - break; - case BCLR: - Format(instr, "bclr.'t 'wd, 'ws, 'wt"); - break; - case BSET: - Format(instr, "bset.'t 'wd, 'ws, 'wt"); - break; - case BNEG: - Format(instr, "bneg.'t 'wd, 'ws, 'wt"); - break; - case BINSL: - Format(instr, "binsl.'t 'wd, 'ws, 'wt"); - break; - case BINSR: - Format(instr, "binsr.'t 'wd, 'ws, 'wt"); - break; - case ADDV: - Format(instr, "addv.'t 'wd, 'ws, 'wt"); - break; - case SUBV: - Format(instr, "subv.'t 'wd, 'ws, 'wt"); - break; - case MAX_S: - Format(instr, "max_s.'t 'wd, 'ws, 'wt"); - break; - case MAX_U: - Format(instr, "max_u.'t 'wd, 'ws, 'wt"); - break; - case MIN_S: - Format(instr, "min_s.'t 'wd, 'ws, 'wt"); - break; - case MIN_U: - Format(instr, "min_u.'t 'wd, 'ws, 'wt"); - break; - case MAX_A: - Format(instr, "max_a.'t 'wd, 'ws, 'wt"); - break; - case MIN_A: - Format(instr, "min_a.'t 'wd, 'ws, 'wt"); - break; - case CEQ: - Format(instr, "ceq.'t 'wd, 'ws, 'wt"); - break; - case CLT_S: - Format(instr, "clt_s.'t 'wd, 'ws, 'wt"); - break; - case CLT_U: - Format(instr, "clt_u.'t 'wd, 'ws, 'wt"); - break; - case CLE_S: - Format(instr, "cle_s.'t 'wd, 'ws, 'wt"); - break; - case CLE_U: - Format(instr, "cle_u.'t 'wd, 'ws, 'wt"); - break; - case ADD_A: - Format(instr, "add_a.'t 'wd, 'ws, 'wt"); - break; - case ADDS_A: - Format(instr, "adds_a.'t 'wd, 'ws, 'wt"); - break; - case ADDS_S: - Format(instr, "adds_s.'t 'wd, 'ws, 'wt"); - break; - case ADDS_U: - Format(instr, "adds_u.'t 'wd, 'ws, 'wt"); - break; - case AVE_S: - Format(instr, "ave_s.'t 'wd, 'ws, 'wt"); - break; - case AVE_U: - Format(instr, "ave_u.'t 'wd, 'ws, 'wt"); - break; - case AVER_S: - Format(instr, "aver_s.'t 'wd, 'ws, 'wt"); - break; - case AVER_U: - Format(instr, "aver_u.'t 'wd, 'ws, 'wt"); - break; - case SUBS_S: - Format(instr, "subs_s.'t 'wd, 'ws, 'wt"); - break; - case SUBS_U: - Format(instr, "subs_u.'t 'wd, 'ws, 'wt"); - break; - case SUBSUS_U: - Format(instr, "subsus_u.'t 'wd, 'ws, 'wt"); - break; - case SUBSUU_S: - Format(instr, "subsuu_s.'t 'wd, 'ws, 'wt"); - break; - case ASUB_S: - Format(instr, "asub_s.'t 'wd, 'ws, 'wt"); - break; - case ASUB_U: - Format(instr, "asub_u.'t 'wd, 'ws, 'wt"); - break; - case MULV: - Format(instr, "mulv.'t 'wd, 'ws, 'wt"); - break; - case MADDV: - Format(instr, "maddv.'t 'wd, 'ws, 'wt"); - break; - case MSUBV: - Format(instr, "msubv.'t 'wd, 'ws, 'wt"); - break; - case DIV_S_MSA: - Format(instr, "div_s.'t 'wd, 'ws, 'wt"); - break; - case DIV_U: - Format(instr, "div_u.'t 'wd, 'ws, 'wt"); - break; - case MOD_S: - Format(instr, "mod_s.'t 'wd, 'ws, 'wt"); - break; - case MOD_U: - Format(instr, "mod_u.'t 'wd, 'ws, 'wt"); - break; - case DOTP_S: - Format(instr, "dotp_s.'t 'wd, 'ws, 'wt"); - break; - case DOTP_U: - Format(instr, "dotp_u.'t 'wd, 'ws, 'wt"); - break; - case DPADD_S: - Format(instr, "dpadd_s.'t 'wd, 'ws, 'wt"); - break; - case DPADD_U: - Format(instr, "dpadd_u.'t 'wd, 'ws, 'wt"); - break; - case DPSUB_S: - Format(instr, "dpsub_s.'t 'wd, 'ws, 'wt"); - break; - case DPSUB_U: - Format(instr, "dpsub_u.'t 'wd, 'ws, 'wt"); - break; - case SLD: - Format(instr, "sld.'t 'wd, 'ws['rt]"); - break; - case SPLAT: - Format(instr, "splat.'t 'wd, 'ws['rt]"); - break; - case PCKEV: - Format(instr, "pckev.'t 'wd, 'ws, 'wt"); - break; - case PCKOD: - Format(instr, "pckod.'t 'wd, 'ws, 'wt"); - break; - case ILVL: - Format(instr, "ilvl.'t 'wd, 'ws, 'wt"); - break; - case ILVR: - Format(instr, "ilvr.'t 'wd, 'ws, 'wt"); - break; - case ILVEV: - Format(instr, "ilvev.'t 'wd, 'ws, 'wt"); - break; - case ILVOD: - Format(instr, "ilvod.'t 'wd, 'ws, 'wt"); - break; - case VSHF: - Format(instr, "vshf.'t 'wd, 'ws, 'wt"); - break; - case SRAR: - Format(instr, "srar.'t 'wd, 'ws, 'wt"); - break; - case SRLR: - Format(instr, "srlr.'t 'wd, 'ws, 'wt"); - break; - case HADD_S: - Format(instr, "hadd_s.'t 'wd, 'ws, 'wt"); - break; - case HADD_U: - Format(instr, "hadd_u.'t 'wd, 'ws, 'wt"); - break; - case HSUB_S: - Format(instr, "hsub_s.'t 'wd, 'ws, 'wt"); - break; - case HSUB_U: - Format(instr, "hsub_u.'t 'wd, 'ws, 'wt"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsa3RF(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsa3RFMask; - switch (opcode) { - case FCAF: - Format(instr, "fcaf.'t 'wd, 'ws, 'wt"); - break; - case FCUN: - Format(instr, "fcun.'t 'wd, 'ws, 'wt"); - break; - case FCEQ: - Format(instr, "fceq.'t 'wd, 'ws, 'wt"); - break; - case FCUEQ: - Format(instr, "fcueq.'t 'wd, 'ws, 'wt"); - break; - case FCLT: - Format(instr, "fclt.'t 'wd, 'ws, 'wt"); - break; - case FCULT: - Format(instr, "fcult.'t 'wd, 'ws, 'wt"); - break; - case FCLE: - Format(instr, "fcle.'t 'wd, 'ws, 'wt"); - break; - case FCULE: - Format(instr, "fcule.'t 'wd, 'ws, 'wt"); - break; - case FSAF: - Format(instr, "fsaf.'t 'wd, 'ws, 'wt"); - break; - case FSUN: - Format(instr, "fsun.'t 'wd, 'ws, 'wt"); - break; - case FSEQ: - Format(instr, "fseq.'t 'wd, 'ws, 'wt"); - break; - case FSUEQ: - Format(instr, "fsueq.'t 'wd, 'ws, 'wt"); - break; - case FSLT: - Format(instr, "fslt.'t 'wd, 'ws, 'wt"); - break; - case FSULT: - Format(instr, "fsult.'t 'wd, 'ws, 'wt"); - break; - case FSLE: - Format(instr, "fsle.'t 'wd, 'ws, 'wt"); - break; - case FSULE: - Format(instr, "fsule.'t 'wd, 'ws, 'wt"); - break; - case FADD: - Format(instr, "fadd.'t 'wd, 'ws, 'wt"); - break; - case FSUB: - Format(instr, "fsub.'t 'wd, 'ws, 'wt"); - break; - case FMUL: - Format(instr, "fmul.'t 'wd, 'ws, 'wt"); - break; - case FDIV: - Format(instr, "fdiv.'t 'wd, 'ws, 'wt"); - break; - case FMADD: - Format(instr, "fmadd.'t 'wd, 'ws, 'wt"); - break; - case FMSUB: - Format(instr, "fmsub.'t 'wd, 'ws, 'wt"); - break; - case FEXP2: - Format(instr, "fexp2.'t 'wd, 'ws, 'wt"); - break; - case FEXDO: - Format(instr, "fexdo.'t 'wd, 'ws, 'wt"); - break; - case FTQ: - Format(instr, "ftq.'t 'wd, 'ws, 'wt"); - break; - case FMIN: - Format(instr, "fmin.'t 'wd, 'ws, 'wt"); - break; - case FMIN_A: - Format(instr, "fmin_a.'t 'wd, 'ws, 'wt"); - break; - case FMAX: - Format(instr, "fmax.'t 'wd, 'ws, 'wt"); - break; - case FMAX_A: - Format(instr, "fmax_a.'t 'wd, 'ws, 'wt"); - break; - case FCOR: - Format(instr, "fcor.'t 'wd, 'ws, 'wt"); - break; - case FCUNE: - Format(instr, "fcune.'t 'wd, 'ws, 'wt"); - break; - case FCNE: - Format(instr, "fcne.'t 'wd, 'ws, 'wt"); - break; - case MUL_Q: - Format(instr, "mul_q.'t 'wd, 'ws, 'wt"); - break; - case MADD_Q: - Format(instr, "madd_q.'t 'wd, 'ws, 'wt"); - break; - case MSUB_Q: - Format(instr, "msub_q.'t 'wd, 'ws, 'wt"); - break; - case FSOR: - Format(instr, "fsor.'t 'wd, 'ws, 'wt"); - break; - case FSUNE: - Format(instr, "fsune.'t 'wd, 'ws, 'wt"); - break; - case FSNE: - Format(instr, "fsne.'t 'wd, 'ws, 'wt"); - break; - case MULR_Q: - Format(instr, "mulr_q.'t 'wd, 'ws, 'wt"); - break; - case MADDR_Q: - Format(instr, "maddr_q.'t 'wd, 'ws, 'wt"); - break; - case MSUBR_Q: - Format(instr, "msubr_q.'t 'wd, 'ws, 'wt"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsaVec(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsaVECMask; - switch (opcode) { - case AND_V: - Format(instr, "and.v 'wd, 'ws, 'wt"); - break; - case OR_V: - Format(instr, "or.v 'wd, 'ws, 'wt"); - break; - case NOR_V: - Format(instr, "nor.v 'wd, 'ws, 'wt"); - break; - case XOR_V: - Format(instr, "xor.v 'wd, 'ws, 'wt"); - break; - case BMNZ_V: - Format(instr, "bmnz.v 'wd, 'ws, 'wt"); - break; - case BMZ_V: - Format(instr, "bmz.v 'wd, 'ws, 'wt"); - break; - case BSEL_V: - Format(instr, "bsel.v 'wd, 'ws, 'wt"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsa2R(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsa2RMask; - switch (opcode) { - case FILL: { - Format(instr, "fill.'t 'wd, "); - PrintRegister(instr->WsValue()); // rs value is in ws field - } break; - case PCNT: - Format(instr, "pcnt.'t 'wd, 'ws"); - break; - case NLOC: - Format(instr, "nloc.'t 'wd, 'ws"); - break; - case NLZC: - Format(instr, "nlzc.'t 'wd, 'ws"); - break; - default: - UNREACHABLE(); - } -} - -void Decoder::DecodeTypeMsa2RF(Instruction* instr) { - uint32_t opcode = instr->InstructionBits() & kMsa2RFMask; - switch (opcode) { - case FCLASS: - Format(instr, "fclass.'t 'wd, 'ws"); - break; - case FTRUNC_S: - Format(instr, "ftrunc_s.'t 'wd, 'ws"); - break; - case FTRUNC_U: - Format(instr, "ftrunc_u.'t 'wd, 'ws"); - break; - case FSQRT: - Format(instr, "fsqrt.'t 'wd, 'ws"); - break; - case FRSQRT: - Format(instr, "frsqrt.'t 'wd, 'ws"); - break; - case FRCP: - Format(instr, "frcp.'t 'wd, 'ws"); - break; - case FRINT: - Format(instr, "frint.'t 'wd, 'ws"); - break; - case FLOG2: - Format(instr, "flog2.'t 'wd, 'ws"); - break; - case FEXUPL: - Format(instr, "fexupl.'t 'wd, 'ws"); - break; - case FEXUPR: - Format(instr, "fexupr.'t 'wd, 'ws"); - break; - case FFQL: - Format(instr, "ffql.'t 'wd, 'ws"); - break; - case FFQR: - Format(instr, "ffqr.'t 'wd, 'ws"); - break; - case FTINT_S: - Format(instr, "ftint_s.'t 'wd, 'ws"); - break; - case FTINT_U: - Format(instr, "ftint_u.'t 'wd, 'ws"); - break; - case FFINT_S: - Format(instr, "ffint_s.'t 'wd, 'ws"); - break; - case FFINT_U: - Format(instr, "ffint_u.'t 'wd, 'ws"); - break; - default: - UNREACHABLE(); - } -} - -// Disassemble the instruction at *instr_ptr into the output buffer. -int Decoder::InstructionDecode(byte* instr_ptr) { - Instruction* instr = Instruction::At(instr_ptr); - // Print raw instruction bytes. - out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, - "%08x ", instr->InstructionBits()); - switch (instr->InstructionType()) { - case Instruction::kRegisterType: { - DecodeTypeRegister(instr); - break; - } - case Instruction::kImmediateType: { - DecodeTypeImmediate(instr); - break; - } - case Instruction::kJumpType: { - DecodeTypeJump(instr); - break; - } - default: { - Format(instr, "UNSUPPORTED"); - UNSUPPORTED_MIPS(); - } - } - return kInstrSize; -} - -} // namespace internal -} // namespace v8 - -//------------------------------------------------------------------------------ - -namespace disasm { - -const char* NameConverter::NameOfAddress(byte* addr) const { - v8::base::SNPrintF(tmp_buffer_, "%p", static_cast(addr)); - return tmp_buffer_.begin(); -} - -const char* NameConverter::NameOfConstant(byte* addr) const { - return NameOfAddress(addr); -} - -const char* NameConverter::NameOfCPURegister(int reg) const { - return v8::internal::Registers::Name(reg); -} - -const char* NameConverter::NameOfXMMRegister(int reg) const { - return v8::internal::FPURegisters::Name(reg); -} - -const char* NameConverter::NameOfByteCPURegister(int reg) const { - UNREACHABLE(); // MIPS does not have the concept of a byte register. -} - -const char* NameConverter::NameInCode(byte* addr) const { - // The default name converter is called for unknown code. So we will not try - // to access any memory. - return ""; -} - -//------------------------------------------------------------------------------ - -int Disassembler::InstructionDecode(v8::base::Vector buffer, - byte* instruction) { - v8::internal::Decoder d(converter_, buffer); - return d.InstructionDecode(instruction); -} - -// The MIPS assembler does not currently use constant pools. -int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; } - -void Disassembler::Disassemble(FILE* f, byte* begin, byte* end, - UnimplementedOpcodeAction unimplemented_action) { - NameConverter converter; - Disassembler d(converter, unimplemented_action); - for (byte* pc = begin; pc < end;) { - v8::base::EmbeddedVector buffer; - buffer[0] = '\0'; - byte* prev_pc = pc; - pc += d.InstructionDecode(buffer, pc); - v8::internal::PrintF(f, "%p %08x %s\n", static_cast(prev_pc), - *reinterpret_cast(prev_pc), buffer.begin()); - } -} - -#undef STRING_STARTS_WITH - -} // namespace disasm - -#endif // V8_TARGET_ARCH_MIPS diff --git a/src/diagnostics/mips/unwinder-mips.cc b/src/diagnostics/mips/unwinder-mips.cc deleted file mode 100644 index 0314458005..0000000000 --- a/src/diagnostics/mips/unwinder-mips.cc +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2020 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/diagnostics/unwinder.h" - -namespace v8 { - -struct RegisterState; - -void GetCalleeSavedRegistersFromEntryFrame(void* fp, - RegisterState* register_state) {} - -} // namespace v8 diff --git a/src/diagnostics/perf-jit.h b/src/diagnostics/perf-jit.h index 0ee2a53a22..0211b1baf6 100644 --- a/src/diagnostics/perf-jit.h +++ b/src/diagnostics/perf-jit.h @@ -85,7 +85,6 @@ class LinuxPerfJitLogger : public CodeEventLogger { static const uint32_t kElfMachIA32 = 3; static const uint32_t kElfMachX64 = 62; static const uint32_t kElfMachARM = 40; - static const uint32_t kElfMachMIPS = 8; static const uint32_t kElfMachMIPS64 = 8; static const uint32_t kElfMachLOONG64 = 258; static const uint32_t kElfMachARM64 = 183; @@ -100,8 +99,6 @@ class LinuxPerfJitLogger : public CodeEventLogger { return kElfMachX64; #elif V8_TARGET_ARCH_ARM return kElfMachARM; -#elif V8_TARGET_ARCH_MIPS - return kElfMachMIPS; #elif V8_TARGET_ARCH_MIPS64 return kElfMachMIPS64; #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/execution/clobber-registers.cc b/src/execution/clobber-registers.cc index a7f5bf80cf..f6e7e1e5e6 100644 --- a/src/execution/clobber-registers.cc +++ b/src/execution/clobber-registers.cc @@ -18,8 +18,6 @@ #include "src/codegen/x64/register-x64.h" #elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64 #include "src/codegen/loong64/register-loong64.h" -#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS -#include "src/codegen/mips/register-mips.h" #elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64 #include "src/codegen/mips64/register-mips64.h" #endif @@ -52,9 +50,6 @@ namespace internal { #elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64 #define CLOBBER_REGISTER(R) __asm__ volatile("movgr2fr.d $" #R ",$zero" :::); -#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS -#define CLOBBER_USE_REGISTER(R) __asm__ volatile("mtc1 $zero,$" #R :::); - #elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64 #define CLOBBER_USE_REGISTER(R) __asm__ volatile("dmtc1 $zero,$" #R :::); diff --git a/src/execution/frame-constants.h b/src/execution/frame-constants.h index 0f9eaa6f7c..423f41bcdd 100644 --- a/src/execution/frame-constants.h +++ b/src/execution/frame-constants.h @@ -416,8 +416,6 @@ inline static int FrameSlotToFPOffset(int slot) { #include "src/execution/arm/frame-constants-arm.h" #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/execution/ppc/frame-constants-ppc.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/execution/mips/frame-constants-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/execution/mips64/frame-constants-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/execution/mips/frame-constants-mips.cc b/src/execution/mips/frame-constants-mips.cc deleted file mode 100644 index 1c593c05bc..0000000000 --- a/src/execution/mips/frame-constants-mips.cc +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_MIPS - -#include "src/execution/mips/frame-constants-mips.h" - -#include "src/codegen/mips/assembler-mips-inl.h" -#include "src/execution/frame-constants.h" -#include "src/execution/frames.h" - -namespace v8 { -namespace internal { - -Register JavaScriptFrame::fp_register() { return v8::internal::fp; } -Register JavaScriptFrame::context_register() { return cp; } -Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); } - -int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) { - return register_count; -} - -int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) { - USE(register_count); - return 0; -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS diff --git a/src/execution/mips/frame-constants-mips.h b/src/execution/mips/frame-constants-mips.h deleted file mode 100644 index 81a85eb75b..0000000000 --- a/src/execution/mips/frame-constants-mips.h +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_ -#define V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_ - -#include "src/base/bits.h" -#include "src/base/macros.h" -#include "src/codegen/register.h" -#include "src/execution/frame-constants.h" - -namespace v8 { -namespace internal { - -class EntryFrameConstants : public AllStatic { - public: - // This is the offset to where JSEntry pushes the current value of - // Isolate::c_entry_fp onto the stack. - static constexpr int kCallerFPOffset = -3 * kSystemPointerSize; - - // Stack offsets for arguments passed to JSEntry. - static constexpr int kArgcOffset = +0 * kSystemPointerSize; - static constexpr int kArgvOffset = +1 * kSystemPointerSize; -}; - -class WasmCompileLazyFrameConstants : public TypedFrameConstants { - public: - static constexpr int kNumberOfSavedGpParamRegs = 3; - static constexpr int kNumberOfSavedFpParamRegs = 7; - static constexpr int kNumberOfSavedAllParamRegs = 10; - - // FP-relative. - // See Generate_WasmCompileLazy in builtins-mips.cc. - static constexpr int kWasmInstanceOffset = - TYPED_FRAME_PUSHED_VALUE_OFFSET(kNumberOfSavedAllParamRegs); - static constexpr int kFixedFrameSizeFromFp = - TypedFrameConstants::kFixedFrameSizeFromFp + - kNumberOfSavedGpParamRegs * kPointerSize + - kNumberOfSavedFpParamRegs * kDoubleSize; -}; - -// Frame constructed by the {WasmDebugBreak} builtin. -// After pushing the frame type marker, the builtin pushes all Liftoff cache -// registers (see liftoff-assembler-defs.h). -class WasmDebugBreakFrameConstants : public TypedFrameConstants { - public: - // {v0, v1, a0, a1, a2, a3, t0, t1, t2, t3, t4, t5, t6, s7} - static constexpr RegList kPushedGpRegs = {v0, v1, a0, a1, a2, a3, t0, - t1, t2, t3, t4, t5, t6, s7}; - // {f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24} - static constexpr DoubleRegList kPushedFpRegs = { - f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24}; - - static constexpr int kNumPushedGpRegisters = kPushedGpRegs.Count(); - static constexpr int kNumPushedFpRegisters = kPushedFpRegs.Count(); - - static constexpr int kLastPushedGpRegisterOffset = - -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize; - static constexpr int kLastPushedFpRegisterOffset = - kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize; - - // Offsets are fp-relative. - static int GetPushedGpRegisterOffset(int reg_code) { - DCHECK_NE(0, kPushedGpRegs.bits() & (1 << reg_code)); - uint32_t lower_regs = - kPushedGpRegs.bits() & ((uint32_t{1} << reg_code) - 1); - return kLastPushedGpRegisterOffset + - base::bits::CountPopulation(lower_regs) * kSystemPointerSize; - } - - static int GetPushedFpRegisterOffset(int reg_code) { - DCHECK_NE(0, kPushedFpRegs.bits() & (1 << reg_code)); - uint32_t lower_regs = - kPushedFpRegs.bits() & ((uint32_t{1} << reg_code) - 1); - return kLastPushedFpRegisterOffset + - base::bits::CountPopulation(lower_regs) * kDoubleSize; - } -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_EXECUTION_MIPS_FRAME_CONSTANTS_MIPS_H_ diff --git a/src/execution/mips/simulator-mips.cc b/src/execution/mips/simulator-mips.cc deleted file mode 100644 index 22551ef2c7..0000000000 --- a/src/execution/mips/simulator-mips.cc +++ /dev/null @@ -1,7304 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/execution/mips/simulator-mips.h" - -// Only build the simulator if not compiling for real MIPS hardware. -#if defined(USE_SIMULATOR) - -#include -#include -#include - -#include - -#include "src/base/bits.h" -#include "src/base/lazy-instance.h" -#include "src/base/platform/memory.h" -#include "src/base/platform/platform.h" -#include "src/base/vector.h" -#include "src/codegen/assembler-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/codegen/mips/constants-mips.h" -#include "src/diagnostics/disasm.h" -#include "src/heap/combined-heap.h" -#include "src/runtime/runtime-utils.h" -#include "src/utils/ostreams.h" - -namespace v8 { -namespace internal { - -DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor, - Simulator::GlobalMonitor::Get) - -// Utils functions. -bool HaveSameSign(int32_t a, int32_t b) { return ((a ^ b) >= 0); } - -uint32_t get_fcsr_condition_bit(uint32_t cc) { - if (cc == 0) { - return 23; - } else { - return 24 + cc; - } -} - -// This macro provides a platform independent use of sscanf. The reason for -// SScanF not being implemented in a platform independent was through -// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time -// Library does not provide vsscanf. -#define SScanF sscanf - -// The MipsDebugger class is used by the simulator while debugging simulated -// code. -class MipsDebugger { - public: - explicit MipsDebugger(Simulator* sim) : sim_(sim) {} - - void Stop(Instruction* instr); - void Debug(); - // Print all registers with a nice formatting. - void PrintAllRegs(); - void PrintAllRegsIncludingFPU(); - - private: - // We set the breakpoint code to 0xFFFFF to easily recognize it. - static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xFFFFF << 6; - static const Instr kNopInstr = 0x0; - - Simulator* sim_; - - int32_t GetRegisterValue(int regnum); - int32_t GetFPURegisterValue32(int regnum); - int64_t GetFPURegisterValue64(int regnum); - float GetFPURegisterValueFloat(int regnum); - double GetFPURegisterValueDouble(int regnum); - bool GetValue(const char* desc, int32_t* value); - bool GetValue(const char* desc, int64_t* value); - - // Set or delete a breakpoint. Returns true if successful. - bool SetBreakpoint(Instruction* breakpc); - bool DeleteBreakpoint(Instruction* breakpc); - - // Undo and redo all breakpoints. This is needed to bracket disassembly and - // execution to skip past breakpoints when run from the debugger. - void UndoBreakpoints(); - void RedoBreakpoints(); -}; - -#define UNSUPPORTED() printf("Sim: Unsupported instruction.\n"); - -void MipsDebugger::Stop(Instruction* instr) { - // Get the stop code. - uint32_t code = instr->Bits(25, 6); - PrintF("Simulator hit (%u)\n", code); - Debug(); -} - -int32_t MipsDebugger::GetRegisterValue(int regnum) { - if (regnum == kNumSimuRegisters) { - return sim_->get_pc(); - } else { - return sim_->get_register(regnum); - } -} - -int32_t MipsDebugger::GetFPURegisterValue32(int regnum) { - if (regnum == kNumFPURegisters) { - return sim_->get_pc(); - } else { - return sim_->get_fpu_register_word(regnum); - } -} - -int64_t MipsDebugger::GetFPURegisterValue64(int regnum) { - if (regnum == kNumFPURegisters) { - return sim_->get_pc(); - } else { - return sim_->get_fpu_register(regnum); - } -} - -float MipsDebugger::GetFPURegisterValueFloat(int regnum) { - if (regnum == kNumFPURegisters) { - return sim_->get_pc(); - } else { - return sim_->get_fpu_register_float(regnum); - } -} - -double MipsDebugger::GetFPURegisterValueDouble(int regnum) { - if (regnum == kNumFPURegisters) { - return sim_->get_pc(); - } else { - return sim_->get_fpu_register_double(regnum); - } -} - -bool MipsDebugger::GetValue(const char* desc, int32_t* value) { - int regnum = Registers::Number(desc); - int fpuregnum = FPURegisters::Number(desc); - - if (regnum != kInvalidRegister) { - *value = GetRegisterValue(regnum); - return true; - } else if (fpuregnum != kInvalidFPURegister) { - *value = GetFPURegisterValue32(fpuregnum); - return true; - } else if (strncmp(desc, "0x", 2) == 0) { - return SScanF(desc, "%x", reinterpret_cast(value)) == 1; - } else { - return SScanF(desc, "%i", value) == 1; - } -} - -bool MipsDebugger::GetValue(const char* desc, int64_t* value) { - int regnum = Registers::Number(desc); - int fpuregnum = FPURegisters::Number(desc); - - if (regnum != kInvalidRegister) { - *value = GetRegisterValue(regnum); - return true; - } else if (fpuregnum != kInvalidFPURegister) { - *value = GetFPURegisterValue64(fpuregnum); - return true; - } else if (strncmp(desc, "0x", 2) == 0) { - return SScanF(desc + 2, "%" SCNx64, reinterpret_cast(value)) == - 1; - } else { - return SScanF(desc, "%" SCNu64, reinterpret_cast(value)) == 1; - } -} - -bool MipsDebugger::SetBreakpoint(Instruction* breakpc) { - // Check if a breakpoint can be set. If not return without any side-effects. - if (sim_->break_pc_ != nullptr) { - return false; - } - - // Set the breakpoint. - sim_->break_pc_ = breakpc; - sim_->break_instr_ = breakpc->InstructionBits(); - // Not setting the breakpoint instruction in the code itself. It will be set - // when the debugger shell continues. - return true; -} - -bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) { - if (sim_->break_pc_ != nullptr) { - sim_->break_pc_->SetInstructionBits(sim_->break_instr_); - } - - sim_->break_pc_ = nullptr; - sim_->break_instr_ = 0; - return true; -} - -void MipsDebugger::UndoBreakpoints() { - if (sim_->break_pc_ != nullptr) { - sim_->break_pc_->SetInstructionBits(sim_->break_instr_); - } -} - -void MipsDebugger::RedoBreakpoints() { - if (sim_->break_pc_ != nullptr) { - sim_->break_pc_->SetInstructionBits(kBreakpointInstr); - } -} - -void MipsDebugger::PrintAllRegs() { -#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n) - - PrintF("\n"); - // at, v0, a0. - PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(1), - REG_INFO(2), REG_INFO(4)); - // v1, a1. - PrintF("%26s\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", "", REG_INFO(3), - REG_INFO(5)); - // a2. - PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(6)); - // a3. - PrintF("%26s\t%26s\t%3s: 0x%08x %10d\n", "", "", REG_INFO(7)); - PrintF("\n"); - // t0-t7, s0-s7 - for (int i = 0; i < 8; i++) { - PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(8 + i), - REG_INFO(16 + i)); - } - PrintF("\n"); - // t8, k0, LO. - PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(24), - REG_INFO(26), REG_INFO(32)); - // t9, k1, HI. - PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(25), - REG_INFO(27), REG_INFO(33)); - // sp, fp, gp. - PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(29), - REG_INFO(30), REG_INFO(28)); - // pc. - PrintF("%3s: 0x%08x %10d\t%3s: 0x%08x %10d\n", REG_INFO(31), REG_INFO(34)); - -#undef REG_INFO -} - -void MipsDebugger::PrintAllRegsIncludingFPU() { -#define FPU_REG_INFO32(n) \ - FPURegisters::Name(n), FPURegisters::Name(n + 1), \ - GetFPURegisterValue32(n + 1), GetFPURegisterValue32(n), \ - GetFPURegisterValueDouble(n) - -#define FPU_REG_INFO64(n) \ - FPURegisters::Name(n), GetFPURegisterValue64(n), GetFPURegisterValueDouble(n) - - PrintAllRegs(); - - PrintF("\n\n"); - // f0, f1, f2, ... f31. - // This must be a compile-time switch, - // compiler will throw out warnings otherwise. - if (kFpuMode == kFP64) { - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(0)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(1)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(2)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(3)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(4)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(5)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(6)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(7)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(8)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(9)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(10)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(11)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(12)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(13)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(14)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(15)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(16)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(17)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(18)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(19)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(20)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(21)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(22)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(23)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(24)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(25)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(26)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(27)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(28)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(29)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(30)); - PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(31)); - } else { - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(0)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(2)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(4)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(6)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(8)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(10)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(12)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(14)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(16)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(18)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(20)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(22)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(24)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(26)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(28)); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(30)); - } - -#undef FPU_REG_INFO32 -#undef FPU_REG_INFO64 -} - -void MipsDebugger::Debug() { - intptr_t last_pc = -1; - bool done = false; - -#define COMMAND_SIZE 63 -#define ARG_SIZE 255 - -#define STR(a) #a -#define XSTR(a) STR(a) - - char cmd[COMMAND_SIZE + 1]; - char arg1[ARG_SIZE + 1]; - char arg2[ARG_SIZE + 1]; - char* argv[3] = {cmd, arg1, arg2}; - - // Make sure to have a proper terminating character if reaching the limit. - cmd[COMMAND_SIZE] = 0; - arg1[ARG_SIZE] = 0; - arg2[ARG_SIZE] = 0; - - // Undo all set breakpoints while running in the debugger shell. This will - // make them invisible to all commands. - UndoBreakpoints(); - - while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) { - if (last_pc != sim_->get_pc()) { - disasm::NameConverter converter; - disasm::Disassembler dasm(converter); - // Use a reasonably large buffer. - v8::base::EmbeddedVector buffer; - dasm.InstructionDecode(buffer, reinterpret_cast(sim_->get_pc())); - PrintF(" 0x%08x %s\n", sim_->get_pc(), buffer.begin()); - last_pc = sim_->get_pc(); - } - char* line = ReadLine("sim> "); - if (line == nullptr) { - break; - } else { - char* last_input = sim_->last_debugger_input(); - if (strcmp(line, "\n") == 0 && last_input != nullptr) { - line = last_input; - } else { - // Ownership is transferred to sim_; - sim_->set_last_debugger_input(line); - } - // Use sscanf to parse the individual parts of the command line. At the - // moment no command expects more than two parameters. - int argc = SScanF(line, - "%" XSTR(COMMAND_SIZE) "s " - "%" XSTR(ARG_SIZE) "s " - "%" XSTR(ARG_SIZE) "s", - cmd, arg1, arg2); - if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) { - Instruction* instr = reinterpret_cast(sim_->get_pc()); - if (!(instr->IsTrap()) || - instr->InstructionBits() == rtCallRedirInstr) { - sim_->InstructionDecode( - reinterpret_cast(sim_->get_pc())); - } else { - // Allow si to jump over generated breakpoints. - PrintF("/!\\ Jumping over generated breakpoint.\n"); - sim_->set_pc(sim_->get_pc() + kInstrSize); - } - } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) { - // Execute the one instruction we broke at with breakpoints disabled. - sim_->InstructionDecode(reinterpret_cast(sim_->get_pc())); - // Leave the debugger shell. - done = true; - } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { - if (argc == 2) { - if (strcmp(arg1, "all") == 0) { - PrintAllRegs(); - } else if (strcmp(arg1, "allf") == 0) { - PrintAllRegsIncludingFPU(); - } else { - int regnum = Registers::Number(arg1); - int fpuregnum = FPURegisters::Number(arg1); - - if (regnum != kInvalidRegister) { - int32_t value; - value = GetRegisterValue(regnum); - PrintF("%s: 0x%08x %d \n", arg1, value, value); - } else if (fpuregnum != kInvalidFPURegister) { - if (IsFp64Mode()) { - int64_t value; - double dvalue; - value = GetFPURegisterValue64(fpuregnum); - dvalue = GetFPURegisterValueDouble(fpuregnum); - PrintF("%3s: 0x%016llx %16.4e\n", FPURegisters::Name(fpuregnum), - value, dvalue); - } else { - if (fpuregnum % 2 == 1) { - int32_t value; - float fvalue; - value = GetFPURegisterValue32(fpuregnum); - fvalue = GetFPURegisterValueFloat(fpuregnum); - PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue); - } else { - double dfvalue; - int32_t lvalue1 = GetFPURegisterValue32(fpuregnum); - int32_t lvalue2 = GetFPURegisterValue32(fpuregnum + 1); - dfvalue = GetFPURegisterValueDouble(fpuregnum); - PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", - FPURegisters::Name(fpuregnum + 1), - FPURegisters::Name(fpuregnum), lvalue1, lvalue2, - dfvalue); - } - } - } else { - PrintF("%s unrecognized\n", arg1); - } - } - } else { - if (argc == 3) { - if (strcmp(arg2, "single") == 0) { - int32_t value; - float fvalue; - int fpuregnum = FPURegisters::Number(arg1); - - if (fpuregnum != kInvalidFPURegister) { - value = GetFPURegisterValue32(fpuregnum); - fvalue = GetFPURegisterValueFloat(fpuregnum); - PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue); - } else { - PrintF("%s unrecognized\n", arg1); - } - } else { - PrintF("print single\n"); - } - } else { - PrintF("print or print single\n"); - } - } - } else if ((strcmp(cmd, "po") == 0) || - (strcmp(cmd, "printobject") == 0)) { - if (argc == 2) { - int32_t value; - StdoutStream os; - if (GetValue(arg1, &value)) { - Object obj(value); - os << arg1 << ": \n"; -#ifdef DEBUG - obj.Print(os); - os << "\n"; -#else - os << Brief(obj) << "\n"; -#endif - } else { - os << arg1 << " unrecognized\n"; - } - } else { - PrintF("printobject \n"); - } - } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 || - strcmp(cmd, "dump") == 0) { - int32_t* cur = nullptr; - int32_t* end = nullptr; - int next_arg = 1; - - if (strcmp(cmd, "stack") == 0) { - cur = reinterpret_cast(sim_->get_register(Simulator::sp)); - } else { // Command "mem". - int32_t value; - if (!GetValue(arg1, &value)) { - PrintF("%s unrecognized\n", arg1); - continue; - } - cur = reinterpret_cast(value); - next_arg++; - } - - // TODO(palfia): optimize this. - if (IsFp64Mode()) { - int64_t words; - if (argc == next_arg) { - words = 10; - } else { - if (!GetValue(argv[next_arg], &words)) { - words = 10; - } - } - end = cur + words; - } else { - int32_t words; - if (argc == next_arg) { - words = 10; - } else { - if (!GetValue(argv[next_arg], &words)) { - words = 10; - } - } - end = cur + words; - } - - bool skip_obj_print = (strcmp(cmd, "dump") == 0); - while (cur < end) { - PrintF(" 0x%08" PRIxPTR ": 0x%08x %10d", - reinterpret_cast(cur), *cur, *cur); - Object obj(*cur); - Heap* current_heap = sim_->isolate_->heap(); - if (!skip_obj_print) { - if (obj.IsSmi() || - IsValidHeapObject(current_heap, HeapObject::cast(obj))) { - PrintF(" ("); - if (obj.IsSmi()) { - PrintF("smi %d", Smi::ToInt(obj)); - } else { - obj.ShortPrint(); - } - PrintF(")"); - } - } - PrintF("\n"); - cur++; - } - - } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) || - (strcmp(cmd, "di") == 0)) { - disasm::NameConverter converter; - disasm::Disassembler dasm(converter); - // Use a reasonably large buffer. - v8::base::EmbeddedVector buffer; - - byte* cur = nullptr; - byte* end = nullptr; - - if (argc == 1) { - cur = reinterpret_cast(sim_->get_pc()); - end = cur + (10 * kInstrSize); - } else if (argc == 2) { - int regnum = Registers::Number(arg1); - if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) { - // The argument is an address or a register name. - int32_t value; - if (GetValue(arg1, &value)) { - cur = reinterpret_cast(value); - // Disassemble 10 instructions at . - end = cur + (10 * kInstrSize); - } - } else { - // The argument is the number of instructions. - int32_t value; - if (GetValue(arg1, &value)) { - cur = reinterpret_cast(sim_->get_pc()); - // Disassemble instructions. - end = cur + (value * kInstrSize); - } - } - } else { - int32_t value1; - int32_t value2; - if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { - cur = reinterpret_cast(value1); - end = cur + (value2 * kInstrSize); - } - } - - while (cur < end) { - dasm.InstructionDecode(buffer, cur); - PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), - buffer.begin()); - cur += kInstrSize; - } - } else if (strcmp(cmd, "gdb") == 0) { - PrintF("relinquishing control to gdb\n"); - v8::base::OS::DebugBreak(); - PrintF("regaining control from gdb\n"); - } else if (strcmp(cmd, "break") == 0) { - if (argc == 2) { - int32_t value; - if (GetValue(arg1, &value)) { - if (!SetBreakpoint(reinterpret_cast(value))) { - PrintF("setting breakpoint failed\n"); - } - } else { - PrintF("%s unrecognized\n", arg1); - } - } else { - PrintF("break
\n"); - } - } else if (strcmp(cmd, "del") == 0) { - if (!DeleteBreakpoint(nullptr)) { - PrintF("deleting breakpoint failed\n"); - } - } else if (strcmp(cmd, "flags") == 0) { - PrintF("No flags on MIPS !\n"); - } else if (strcmp(cmd, "stop") == 0) { - int32_t value; - intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize; - Instruction* stop_instr = reinterpret_cast(stop_pc); - Instruction* msg_address = - reinterpret_cast(stop_pc + kInstrSize); - if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) { - // Remove the current stop. - if (sim_->IsStopInstruction(stop_instr)) { - stop_instr->SetInstructionBits(kNopInstr); - msg_address->SetInstructionBits(kNopInstr); - } else { - PrintF("Not at debugger stop.\n"); - } - } else if (argc == 3) { - // Print information about all/the specified breakpoint(s). - if (strcmp(arg1, "info") == 0) { - if (strcmp(arg2, "all") == 0) { - PrintF("Stop information:\n"); - for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; - i++) { - sim_->PrintStopInfo(i); - } - } else if (GetValue(arg2, &value)) { - sim_->PrintStopInfo(value); - } else { - PrintF("Unrecognized argument.\n"); - } - } else if (strcmp(arg1, "enable") == 0) { - // Enable all/the specified breakpoint(s). - if (strcmp(arg2, "all") == 0) { - for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; - i++) { - sim_->EnableStop(i); - } - } else if (GetValue(arg2, &value)) { - sim_->EnableStop(value); - } else { - PrintF("Unrecognized argument.\n"); - } - } else if (strcmp(arg1, "disable") == 0) { - // Disable all/the specified breakpoint(s). - if (strcmp(arg2, "all") == 0) { - for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode; - i++) { - sim_->DisableStop(i); - } - } else if (GetValue(arg2, &value)) { - sim_->DisableStop(value); - } else { - PrintF("Unrecognized argument.\n"); - } - } - } else { - PrintF("Wrong usage. Use help command for more information.\n"); - } - } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) { - // Print registers and disassemble. - PrintAllRegs(); - PrintF("\n"); - - disasm::NameConverter converter; - disasm::Disassembler dasm(converter); - // Use a reasonably large buffer. - v8::base::EmbeddedVector buffer; - - byte* cur = nullptr; - byte* end = nullptr; - - if (argc == 1) { - cur = reinterpret_cast(sim_->get_pc()); - end = cur + (10 * kInstrSize); - } else if (argc == 2) { - int32_t value; - if (GetValue(arg1, &value)) { - cur = reinterpret_cast(value); - // no length parameter passed, assume 10 instructions - end = cur + (10 * kInstrSize); - } - } else { - int32_t value1; - int32_t value2; - if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) { - cur = reinterpret_cast(value1); - end = cur + (value2 * kInstrSize); - } - } - - while (cur < end) { - dasm.InstructionDecode(buffer, cur); - PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast(cur), - buffer.begin()); - cur += kInstrSize; - } - } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) { - PrintF("cont\n"); - PrintF(" continue execution (alias 'c')\n"); - PrintF("stepi\n"); - PrintF(" step one instruction (alias 'si')\n"); - PrintF("print \n"); - PrintF(" print register content (alias 'p')\n"); - PrintF(" use register name 'all' to print all registers\n"); - PrintF("printobject \n"); - PrintF(" print an object from a register (alias 'po')\n"); - PrintF("stack []\n"); - PrintF(" dump stack content, default dump 10 words)\n"); - PrintF("mem
[]\n"); - PrintF(" dump memory content, default dump 10 words)\n"); - PrintF("dump []\n"); - PrintF( - " dump memory content without pretty printing JS objects, default " - "dump 10 words)\n"); - PrintF("flags\n"); - PrintF(" print flags\n"); - PrintF("disasm []\n"); - PrintF("disasm [
]\n"); - PrintF("disasm [[
] ]\n"); - PrintF(" disassemble code, default is 10 instructions\n"); - PrintF(" from pc (alias 'di')\n"); - PrintF("gdb\n"); - PrintF(" enter gdb\n"); - PrintF("break
\n"); - PrintF(" set a break point on the address\n"); - PrintF("del\n"); - PrintF(" delete the breakpoint\n"); - PrintF("stop feature:\n"); - PrintF(" Description:\n"); - PrintF(" Stops are debug instructions inserted by\n"); - PrintF(" the Assembler::stop() function.\n"); - PrintF(" When hitting a stop, the Simulator will\n"); - PrintF(" stop and give control to the Debugger.\n"); - PrintF(" All stop codes are watched:\n"); - PrintF(" - They can be enabled / disabled: the Simulator\n"); - PrintF(" will / won't stop when hitting them.\n"); - PrintF(" - The Simulator keeps track of how many times they \n"); - PrintF(" are met. (See the info command.) Going over a\n"); - PrintF(" disabled stop still increases its counter. \n"); - PrintF(" Commands:\n"); - PrintF(" stop info all/ : print infos about number \n"); - PrintF(" or all stop(s).\n"); - PrintF(" stop enable/disable all/ : enables / disables\n"); - PrintF(" all or number stop(s)\n"); - PrintF(" stop unstop\n"); - PrintF(" ignore the stop instruction at the current location\n"); - PrintF(" from now on\n"); - } else { - PrintF("Unknown command: %s\n", cmd); - } - } - } - - // Add all the breakpoints back to stop execution and enter the debugger - // shell when hit. - RedoBreakpoints(); - -#undef COMMAND_SIZE -#undef ARG_SIZE - -#undef STR -#undef XSTR -} - -bool Simulator::ICacheMatch(void* one, void* two) { - DCHECK_EQ(reinterpret_cast(one) & CachePage::kPageMask, 0); - DCHECK_EQ(reinterpret_cast(two) & CachePage::kPageMask, 0); - return one == two; -} - -static uint32_t ICacheHash(void* key) { - return static_cast(reinterpret_cast(key)) >> 2; -} - -static bool AllOnOnePage(uintptr_t start, int size) { - intptr_t start_page = (start & ~CachePage::kPageMask); - intptr_t end_page = ((start + size) & ~CachePage::kPageMask); - return start_page == end_page; -} - -void Simulator::set_last_debugger_input(char* input) { - DeleteArray(last_debugger_input_); - last_debugger_input_ = input; -} - -void Simulator::SetRedirectInstruction(Instruction* instruction) { - instruction->SetInstructionBits(rtCallRedirInstr); -} - -void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache, - void* start_addr, size_t size) { - intptr_t start = reinterpret_cast(start_addr); - int intra_line = (start & CachePage::kLineMask); - start -= intra_line; - size += intra_line; - size = ((size - 1) | CachePage::kLineMask) + 1; - int offset = (start & CachePage::kPageMask); - while (!AllOnOnePage(start, size - 1)) { - int bytes_to_flush = CachePage::kPageSize - offset; - FlushOnePage(i_cache, start, bytes_to_flush); - start += bytes_to_flush; - size -= bytes_to_flush; - DCHECK_EQ(0, start & CachePage::kPageMask); - offset = 0; - } - if (size != 0) { - FlushOnePage(i_cache, start, size); - } -} - -CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache, - void* page) { - base::CustomMatcherHashMap::Entry* entry = - i_cache->LookupOrInsert(page, ICacheHash(page)); - if (entry->value == nullptr) { - CachePage* new_page = new CachePage(); - entry->value = new_page; - } - return reinterpret_cast(entry->value); -} - -// Flush from start up to and not including start + size. -void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache, - intptr_t start, int size) { - DCHECK_LE(size, CachePage::kPageSize); - DCHECK(AllOnOnePage(start, size - 1)); - DCHECK_EQ(start & CachePage::kLineMask, 0); - DCHECK_EQ(size & CachePage::kLineMask, 0); - void* page = reinterpret_cast(start & (~CachePage::kPageMask)); - int offset = (start & CachePage::kPageMask); - CachePage* cache_page = GetCachePage(i_cache, page); - char* valid_bytemap = cache_page->ValidityByte(offset); - memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift); -} - -void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache, - Instruction* instr) { - intptr_t address = reinterpret_cast(instr); - void* page = reinterpret_cast(address & (~CachePage::kPageMask)); - void* line = reinterpret_cast(address & (~CachePage::kLineMask)); - int offset = (address & CachePage::kPageMask); - CachePage* cache_page = GetCachePage(i_cache, page); - char* cache_valid_byte = cache_page->ValidityByte(offset); - bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID); - char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); - if (cache_hit) { - // Check that the data in memory matches the contents of the I-cache. - CHECK_EQ(0, memcmp(reinterpret_cast(instr), - cache_page->CachedData(offset), kInstrSize)); - } else { - // Cache miss. Load memory into the cache. - memcpy(cached_line, line, CachePage::kLineLength); - *cache_valid_byte = CachePage::LINE_VALID; - } -} - -Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { - // Set up simulator support first. Some of this information is needed to - // setup the architecture state. - stack_size_ = v8_flags.sim_stack_size * KB; - stack_ = reinterpret_cast(base::Malloc(stack_size_)); - pc_modified_ = false; - icount_ = 0; - break_count_ = 0; - break_pc_ = nullptr; - break_instr_ = 0; - - // Set up architecture state. - // All registers are initialized to zero to start with. - for (int i = 0; i < kNumSimuRegisters; i++) { - registers_[i] = 0; - } - for (int i = 0; i < kNumFPURegisters; i++) { - FPUregisters_[2 * i] = 0; - FPUregisters_[2 * i + 1] = 0; // upper part for MSA ASE - } - if (IsMipsArchVariant(kMips32r6)) { - FCSR_ = kFCSRNaN2008FlagMask; - MSACSR_ = 0; - } else { - DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2)); - FCSR_ = 0; - } - - // The sp is initialized to point to the bottom (high address) of the - // allocated stack area. To be safe in potential stack underflows we leave - // some buffer below. - registers_[sp] = reinterpret_cast(stack_) + stack_size_ - 64; - // The ra and pc are initialized to a known bad value that will cause an - // access violation if the simulator ever tries to execute it. - registers_[pc] = bad_ra; - registers_[ra] = bad_ra; - last_debugger_input_ = nullptr; -} - -Simulator::~Simulator() { - GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_); - base::Free(stack_); -} - -// Get the active Simulator for the current thread. -Simulator* Simulator::current(Isolate* isolate) { - v8::internal::Isolate::PerIsolateThreadData* isolate_data = - isolate->FindOrAllocatePerThreadDataForThisThread(); - DCHECK_NOT_NULL(isolate_data); - - Simulator* sim = isolate_data->simulator(); - if (sim == nullptr) { - // TODO(146): delete the simulator object when a thread/isolate goes away. - sim = new Simulator(isolate); - isolate_data->set_simulator(sim); - } - return sim; -} - -// Sets the register in the architecture state. It will also deal with updating -// Simulator internal state for special registers such as PC. -void Simulator::set_register(int reg, int32_t value) { - DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); - if (reg == pc) { - pc_modified_ = true; - } - - // Zero register always holds 0. - registers_[reg] = (reg == 0) ? 0 : value; -} - -void Simulator::set_dw_register(int reg, const int* dbl) { - DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); - registers_[reg] = dbl[0]; - registers_[reg + 1] = dbl[1]; -} - -void Simulator::set_fpu_register(int fpureg, int64_t value) { - DCHECK(IsFp64Mode()); - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - FPUregisters_[fpureg * 2] = value; -} - -void Simulator::set_fpu_register_word(int fpureg, int32_t value) { - // Set ONLY lower 32-bits, leaving upper bits untouched. - // TODO(plind): big endian issue. - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - int32_t* pword = reinterpret_cast(&FPUregisters_[fpureg * 2]); - *pword = value; -} - -void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) { - // Set ONLY upper 32-bits, leaving lower bits untouched. - // TODO(plind): big endian issue. - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - int32_t* phiword = - (reinterpret_cast(&FPUregisters_[fpureg * 2])) + 1; - *phiword = value; -} - -void Simulator::set_fpu_register_float(int fpureg, float value) { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - *base::bit_cast(&FPUregisters_[fpureg * 2]) = value; -} - -void Simulator::set_fpu_register_double(int fpureg, double value) { - if (IsFp64Mode()) { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - *base::bit_cast(&FPUregisters_[fpureg * 2]) = value; - } else { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); - int64_t i64 = base::bit_cast(value); - set_fpu_register_word(fpureg, i64 & 0xFFFFFFFF); - set_fpu_register_word(fpureg + 1, i64 >> 32); - } -} - -// Get the register from the architecture state. This function does handle -// the special case of accessing the PC register. -int32_t Simulator::get_register(int reg) const { - DCHECK((reg >= 0) && (reg < kNumSimuRegisters)); - if (reg == 0) - return 0; - else - return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0); -} - -double Simulator::get_double_from_register_pair(int reg) { - // TODO(plind): bad ABI stuff, refactor or remove. - DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0)); - - double dm_val = 0.0; - // Read the bits from the unsigned integer register_[] array - // into the double precision floating point value and return it. - char buffer[2 * sizeof(registers_[0])]; - memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); - memcpy(&dm_val, buffer, 2 * sizeof(registers_[0])); - return (dm_val); -} - -int64_t Simulator::get_fpu_register(int fpureg) const { - if (IsFp64Mode()) { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - return FPUregisters_[fpureg * 2]; - } else { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); - uint64_t i64; - i64 = static_cast(get_fpu_register_word(fpureg)); - i64 |= static_cast(get_fpu_register_word(fpureg + 1)) << 32; - return static_cast(i64); - } -} - -int32_t Simulator::get_fpu_register_word(int fpureg) const { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - return static_cast(FPUregisters_[fpureg * 2] & 0xFFFFFFFF); -} - -int32_t Simulator::get_fpu_register_signed_word(int fpureg) const { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - return static_cast(FPUregisters_[fpureg * 2] & 0xFFFFFFFF); -} - -int32_t Simulator::get_fpu_register_hi_word(int fpureg) const { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - return static_cast((FPUregisters_[fpureg * 2] >> 32) & 0xFFFFFFFF); -} - -float Simulator::get_fpu_register_float(int fpureg) const { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - return *base::bit_cast( - const_cast(&FPUregisters_[fpureg * 2])); -} - -double Simulator::get_fpu_register_double(int fpureg) const { - if (IsFp64Mode()) { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters)); - return *base::bit_cast(&FPUregisters_[fpureg * 2]); - } else { - DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0)); - int64_t i64; - i64 = static_cast(get_fpu_register_word(fpureg)); - i64 |= static_cast(get_fpu_register_word(fpureg + 1)) << 32; - return base::bit_cast(i64); - } -} - -template -void Simulator::get_msa_register(int wreg, T* value) { - DCHECK((wreg >= 0) && (wreg < kNumMSARegisters)); - memcpy(value, FPUregisters_ + wreg * 2, kSimd128Size); -} - -template -void Simulator::set_msa_register(int wreg, const T* value) { - DCHECK((wreg >= 0) && (wreg < kNumMSARegisters)); - memcpy(FPUregisters_ + wreg * 2, value, kSimd128Size); -} - -// Runtime FP routines take up to two double arguments and zero -// or one integer arguments. All are constructed here, -// from a0-a3 or f12 and f14. -void Simulator::GetFpArgs(double* x, double* y, int32_t* z) { - if (!IsMipsSoftFloatABI) { - *x = get_fpu_register_double(12); - *y = get_fpu_register_double(14); - *z = get_register(a2); - } else { - // TODO(plind): bad ABI stuff, refactor or remove. - // We use a char buffer to get around the strict-aliasing rules which - // otherwise allow the compiler to optimize away the copy. - char buffer[sizeof(*x)]; - int32_t* reg_buffer = reinterpret_cast(buffer); - - // Registers a0 and a1 -> x. - reg_buffer[0] = get_register(a0); - reg_buffer[1] = get_register(a1); - memcpy(x, buffer, sizeof(buffer)); - // Registers a2 and a3 -> y. - reg_buffer[0] = get_register(a2); - reg_buffer[1] = get_register(a3); - memcpy(y, buffer, sizeof(buffer)); - // Register 2 -> z. - reg_buffer[0] = get_register(a2); - memcpy(z, buffer, sizeof(*z)); - } -} - -// The return value is either in v0/v1 or f0. -void Simulator::SetFpResult(const double& result) { - if (!IsMipsSoftFloatABI) { - set_fpu_register_double(0, result); - } else { - char buffer[2 * sizeof(registers_[0])]; - int32_t* reg_buffer = reinterpret_cast(buffer); - memcpy(buffer, &result, sizeof(buffer)); - // Copy result to v0 and v1. - set_register(v0, reg_buffer[0]); - set_register(v1, reg_buffer[1]); - } -} - -// Helper functions for setting and testing the FCSR register's bits. -void Simulator::set_fcsr_bit(uint32_t cc, bool value) { - if (value) { - FCSR_ |= (1 << cc); - } else { - FCSR_ &= ~(1 << cc); - } -} - -bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); } - -void Simulator::clear_fcsr_cause() { - FCSR_ &= ~kFCSRCauseMask; -} - -void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) { - FCSR_ |= mode & kFPURoundingModeMask; -} - -void Simulator::set_msacsr_rounding_mode(FPURoundingMode mode) { - MSACSR_ |= mode & kFPURoundingModeMask; -} - -unsigned int Simulator::get_fcsr_rounding_mode() { - return FCSR_ & kFPURoundingModeMask; -} - -unsigned int Simulator::get_msacsr_rounding_mode() { - return MSACSR_ & kFPURoundingModeMask; -} - -void Simulator::set_fpu_register_word_invalid_result(float original, - float rounded) { - if (FCSR_ & kFCSRNaN2008FlagMask) { - double max_int32 = std::numeric_limits::max(); - double min_int32 = std::numeric_limits::min(); - if (std::isnan(original)) { - set_fpu_register_word(fd_reg(), 0); - } else if (rounded > max_int32) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); - } else if (rounded < min_int32) { - set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); - } else { - UNREACHABLE(); - } - } else { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); - } -} - -void Simulator::set_fpu_register_invalid_result(float original, float rounded) { - if (FCSR_ & kFCSRNaN2008FlagMask) { - double max_int32 = std::numeric_limits::max(); - double min_int32 = std::numeric_limits::min(); - if (std::isnan(original)) { - set_fpu_register(fd_reg(), 0); - } else if (rounded > max_int32) { - set_fpu_register(fd_reg(), kFPUInvalidResult); - } else if (rounded < min_int32) { - set_fpu_register(fd_reg(), kFPUInvalidResultNegative); - } else { - UNREACHABLE(); - } - } else { - set_fpu_register(fd_reg(), kFPUInvalidResult); - } -} - -void Simulator::set_fpu_register_invalid_result64(float original, - float rounded) { - if (FCSR_ & kFCSRNaN2008FlagMask) { - // The value of INT64_MAX (2^63-1) can't be represented as double exactly, - // loading the most accurate representation into max_int64, which is 2^63. - double max_int64 = static_cast(std::numeric_limits::max()); - double min_int64 = std::numeric_limits::min(); - if (std::isnan(original)) { - set_fpu_register(fd_reg(), 0); - } else if (rounded >= max_int64) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); - } else if (rounded < min_int64) { - set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); - } else { - UNREACHABLE(); - } - } else { - set_fpu_register(fd_reg(), kFPU64InvalidResult); - } -} - -void Simulator::set_fpu_register_word_invalid_result(double original, - double rounded) { - if (FCSR_ & kFCSRNaN2008FlagMask) { - double max_int32 = std::numeric_limits::max(); - double min_int32 = std::numeric_limits::min(); - if (std::isnan(original)) { - set_fpu_register_word(fd_reg(), 0); - } else if (rounded > max_int32) { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); - } else if (rounded < min_int32) { - set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative); - } else { - UNREACHABLE(); - } - } else { - set_fpu_register_word(fd_reg(), kFPUInvalidResult); - } -} - -void Simulator::set_fpu_register_invalid_result(double original, - double rounded) { - if (FCSR_ & kFCSRNaN2008FlagMask) { - double max_int32 = std::numeric_limits::max(); - double min_int32 = std::numeric_limits::min(); - if (std::isnan(original)) { - set_fpu_register(fd_reg(), 0); - } else if (rounded > max_int32) { - set_fpu_register(fd_reg(), kFPUInvalidResult); - } else if (rounded < min_int32) { - set_fpu_register(fd_reg(), kFPUInvalidResultNegative); - } else { - UNREACHABLE(); - } - } else { - set_fpu_register(fd_reg(), kFPUInvalidResult); - } -} - -void Simulator::set_fpu_register_invalid_result64(double original, - double rounded) { - if (FCSR_ & kFCSRNaN2008FlagMask) { - // The value of INT64_MAX (2^63-1) can't be represented as double exactly, - // loading the most accurate representation into max_int64, which is 2^63. - double max_int64 = static_cast(std::numeric_limits::max()); - double min_int64 = std::numeric_limits::min(); - if (std::isnan(original)) { - set_fpu_register(fd_reg(), 0); - } else if (rounded >= max_int64) { - set_fpu_register(fd_reg(), kFPU64InvalidResult); - } else if (rounded < min_int64) { - set_fpu_register(fd_reg(), kFPU64InvalidResultNegative); - } else { - UNREACHABLE(); - } - } else { - set_fpu_register(fd_reg(), kFPU64InvalidResult); - } -} - -// Sets the rounding error codes in FCSR based on the result of the rounding. -// Returns true if the operation was invalid. -bool Simulator::set_fcsr_round_error(double original, double rounded) { - bool ret = false; - double max_int32 = std::numeric_limits::max(); - double min_int32 = std::numeric_limits::min(); - - clear_fcsr_cause(); - - if (!std::isfinite(original) || !std::isfinite(rounded)) { - set_fcsr_bit(kFCSRInvalidOpFlagBit, true); - set_fcsr_bit(kFCSRInvalidOpCauseBit, true); - ret = true; - } - - if (original != rounded) { - set_fcsr_bit(kFCSRInexactFlagBit, true); - set_fcsr_bit(kFCSRInexactCauseBit, true); - } - - if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { - set_fcsr_bit(kFCSRUnderflowFlagBit, true); - set_fcsr_bit(kFCSRUnderflowCauseBit, true); - ret = true; - } - - if (rounded > max_int32 || rounded < min_int32) { - set_fcsr_bit(kFCSROverflowFlagBit, true); - set_fcsr_bit(kFCSROverflowCauseBit, true); - // The reference is not really clear but it seems this is required: - set_fcsr_bit(kFCSRInvalidOpFlagBit, true); - set_fcsr_bit(kFCSRInvalidOpCauseBit, true); - ret = true; - } - - return ret; -} - -// Sets the rounding error codes in FCSR based on the result of the rounding. -// Returns true if the operation was invalid. -bool Simulator::set_fcsr_round64_error(double original, double rounded) { - bool ret = false; - // The value of INT64_MAX (2^63-1) can't be represented as double exactly, - // loading the most accurate representation into max_int64, which is 2^63. - double max_int64 = static_cast(std::numeric_limits::max()); - double min_int64 = std::numeric_limits::min(); - - clear_fcsr_cause(); - - if (!std::isfinite(original) || !std::isfinite(rounded)) { - set_fcsr_bit(kFCSRInvalidOpFlagBit, true); - set_fcsr_bit(kFCSRInvalidOpCauseBit, true); - ret = true; - } - - if (original != rounded) { - set_fcsr_bit(kFCSRInexactFlagBit, true); - set_fcsr_bit(kFCSRInexactCauseBit, true); - } - - if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) { - set_fcsr_bit(kFCSRUnderflowFlagBit, true); - set_fcsr_bit(kFCSRUnderflowCauseBit, true); - ret = true; - } - - if (rounded >= max_int64 || rounded < min_int64) { - set_fcsr_bit(kFCSROverflowFlagBit, true); - set_fcsr_bit(kFCSROverflowCauseBit, true); - // The reference is not really clear but it seems this is required: - set_fcsr_bit(kFCSRInvalidOpFlagBit, true); - set_fcsr_bit(kFCSRInvalidOpCauseBit, true); - ret = true; - } - - return ret; -} - -// Sets the rounding error codes in FCSR based on the result of the rounding. -// Returns true if the operation was invalid. -bool Simulator::set_fcsr_round_error(float original, float rounded) { - bool ret = false; - double max_int32 = std::numeric_limits::max(); - double min_int32 = std::numeric_limits::min(); - - clear_fcsr_cause(); - - if (!std::isfinite(original) || !std::isfinite(rounded)) { - set_fcsr_bit(kFCSRInvalidOpFlagBit, true); - set_fcsr_bit(kFCSRInvalidOpCauseBit, true); - ret = true; - } - - if (original != rounded) { - set_fcsr_bit(kFCSRInexactFlagBit, true); - set_fcsr_bit(kFCSRInexactCauseBit, true); - } - - if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { - set_fcsr_bit(kFCSRUnderflowFlagBit, true); - set_fcsr_bit(kFCSRUnderflowCauseBit, true); - ret = true; - } - - if (rounded > max_int32 || rounded < min_int32) { - set_fcsr_bit(kFCSROverflowFlagBit, true); - set_fcsr_bit(kFCSROverflowCauseBit, true); - // The reference is not really clear but it seems this is required: - set_fcsr_bit(kFCSRInvalidOpFlagBit, true); - set_fcsr_bit(kFCSRInvalidOpCauseBit, true); - ret = true; - } - - return ret; -} - -// Sets the rounding error codes in FCSR based on the result of the rounding. -// Returns true if the operation was invalid. -bool Simulator::set_fcsr_round64_error(float original, float rounded) { - bool ret = false; - // The value of INT64_MAX (2^63-1) can't be represented as double exactly, - // loading the most accurate representation into max_int64, which is 2^63. - double max_int64 = static_cast(std::numeric_limits::max()); - double min_int64 = std::numeric_limits::min(); - - clear_fcsr_cause(); - - if (!std::isfinite(original) || !std::isfinite(rounded)) { - set_fcsr_bit(kFCSRInvalidOpFlagBit, true); - set_fcsr_bit(kFCSRInvalidOpCauseBit, true); - ret = true; - } - - if (original != rounded) { - set_fcsr_bit(kFCSRInexactFlagBit, true); - set_fcsr_bit(kFCSRInexactCauseBit, true); - } - - if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) { - set_fcsr_bit(kFCSRUnderflowFlagBit, true); - set_fcsr_bit(kFCSRUnderflowCauseBit, true); - ret = true; - } - - if (rounded >= max_int64 || rounded < min_int64) { - set_fcsr_bit(kFCSROverflowFlagBit, true); - set_fcsr_bit(kFCSROverflowCauseBit, true); - // The reference is not really clear but it seems this is required: - set_fcsr_bit(kFCSRInvalidOpFlagBit, true); - set_fcsr_bit(kFCSRInvalidOpCauseBit, true); - ret = true; - } - - return ret; -} - -void Simulator::round_according_to_fcsr(double toRound, double* rounded, - int32_t* rounded_int, double fs) { - // 0 RN (round to nearest): Round a result to the nearest - // representable value; if the result is exactly halfway between - // two representable values, round to zero. Behave like round_w_d. - - // 1 RZ (round toward zero): Round a result to the closest - // representable value whose absolute value is less than or - // equal to the infinitely accurate result. Behave like trunc_w_d. - - // 2 RP (round up, or toward infinity): Round a result to the - // next representable value up. Behave like ceil_w_d. - - // 3 RD (round down, or toward −infinity): Round a result to - // the next representable value down. Behave like floor_w_d. - switch (get_fcsr_rounding_mode()) { - case kRoundToNearest: - *rounded = std::floor(fs + 0.5); - *rounded_int = static_cast(*rounded); - if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) { - // If the number is halfway between two integers, - // round to the even one. - *rounded_int -= 1; - *rounded -= 1.; - } - break; - case kRoundToZero: - *rounded = trunc(fs); - *rounded_int = static_cast(*rounded); - break; - case kRoundToPlusInf: - *rounded = std::ceil(fs); - *rounded_int = static_cast(*rounded); - break; - case kRoundToMinusInf: - *rounded = std::floor(fs); - *rounded_int = static_cast(*rounded); - break; - } -} - -void Simulator::round_according_to_fcsr(float toRound, float* rounded, - int32_t* rounded_int, float fs) { - // 0 RN (round to nearest): Round a result to the nearest - // representable value; if the result is exactly halfway between - // two representable values, round to zero. Behave like round_w_d. - - // 1 RZ (round toward zero): Round a result to the closest - // representable value whose absolute value is less than or - // equal to the infinitely accurate result. Behave like trunc_w_d. - - // 2 RP (round up, or toward infinity): Round a result to the - // next representable value up. Behave like ceil_w_d. - - // 3 RD (round down, or toward −infinity): Round a result to - // the next representable value down. Behave like floor_w_d. - switch (get_fcsr_rounding_mode()) { - case kRoundToNearest: - *rounded = std::floor(fs + 0.5); - *rounded_int = static_cast(*rounded); - if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) { - // If the number is halfway between two integers, - // round to the even one. - *rounded_int -= 1; - *rounded -= 1.f; - } - break; - case kRoundToZero: - *rounded = trunc(fs); - *rounded_int = static_cast(*rounded); - break; - case kRoundToPlusInf: - *rounded = std::ceil(fs); - *rounded_int = static_cast(*rounded); - break; - case kRoundToMinusInf: - *rounded = std::floor(fs); - *rounded_int = static_cast(*rounded); - break; - } -} - -template -void Simulator::round_according_to_msacsr(T_fp toRound, T_fp* rounded, - T_int* rounded_int) { - // 0 RN (round to nearest): Round a result to the nearest - // representable value; if the result is exactly halfway between - // two representable values, round to zero. Behave like round_w_d. - - // 1 RZ (round toward zero): Round a result to the closest - // representable value whose absolute value is less than or - // equal to the infinitely accurate result. Behave like trunc_w_d. - - // 2 RP (round up, or toward infinity): Round a result to the - // next representable value up. Behave like ceil_w_d. - - // 3 RD (round down, or toward −infinity): Round a result to - // the next representable value down. Behave like floor_w_d. - switch (get_msacsr_rounding_mode()) { - case kRoundToNearest: - *rounded = std::floor(toRound + 0.5); - *rounded_int = static_cast(*rounded); - if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) { - // If the number is halfway between two integers, - // round to the even one. - *rounded_int -= 1; - *rounded -= 1; - } - break; - case kRoundToZero: - *rounded = trunc(toRound); - *rounded_int = static_cast(*rounded); - break; - case kRoundToPlusInf: - *rounded = std::ceil(toRound); - *rounded_int = static_cast(*rounded); - break; - case kRoundToMinusInf: - *rounded = std::floor(toRound); - *rounded_int = static_cast(*rounded); - break; - } -} - -void Simulator::round64_according_to_fcsr(double toRound, double* rounded, - int64_t* rounded_int, double fs) { - // 0 RN (round to nearest): Round a result to the nearest - // representable value; if the result is exactly halfway between - // two representable values, round to zero. Behave like round_w_d. - - // 1 RZ (round toward zero): Round a result to the closest - // representable value whose absolute value is less than or. - // equal to the infinitely accurate result. Behave like trunc_w_d. - - // 2 RP (round up, or toward +infinity): Round a result to the - // next representable value up. Behave like ceil_w_d. - - // 3 RN (round down, or toward −infinity): Round a result to - // the next representable value down. Behave like floor_w_d. - switch (FCSR_ & 3) { - case kRoundToNearest: - *rounded = std::floor(fs + 0.5); - *rounded_int = static_cast(*rounded); - if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) { - // If the number is halfway between two integers, - // round to the even one. - *rounded_int -= 1; - *rounded -= 1.; - } - break; - case kRoundToZero: - *rounded = trunc(fs); - *rounded_int = static_cast(*rounded); - break; - case kRoundToPlusInf: - *rounded = std::ceil(fs); - *rounded_int = static_cast(*rounded); - break; - case kRoundToMinusInf: - *rounded = std::floor(fs); - *rounded_int = static_cast(*rounded); - break; - } -} - -void Simulator::round64_according_to_fcsr(float toRound, float* rounded, - int64_t* rounded_int, float fs) { - // 0 RN (round to nearest): Round a result to the nearest - // representable value; if the result is exactly halfway between - // two representable values, round to zero. Behave like round_w_d. - - // 1 RZ (round toward zero): Round a result to the closest - // representable value whose absolute value is less than or. - // equal to the infinitely accurate result. Behave like trunc_w_d. - - // 2 RP (round up, or toward +infinity): Round a result to the - // next representable value up. Behave like ceil_w_d. - - // 3 RN (round down, or toward −infinity): Round a result to - // the next representable value down. Behave like floor_w_d. - switch (FCSR_ & 3) { - case kRoundToNearest: - *rounded = std::floor(fs + 0.5); - *rounded_int = static_cast(*rounded); - if ((*rounded_int & 1) != 0 && *rounded_int - fs == 0.5) { - // If the number is halfway between two integers, - // round to the even one. - *rounded_int -= 1; - *rounded -= 1.f; - } - break; - case kRoundToZero: - *rounded = trunc(fs); - *rounded_int = static_cast(*rounded); - break; - case kRoundToPlusInf: - *rounded = std::ceil(fs); - *rounded_int = static_cast(*rounded); - break; - case kRoundToMinusInf: - *rounded = std::floor(fs); - *rounded_int = static_cast(*rounded); - break; - } -} - -// Raw access to the PC register. -void Simulator::set_pc(int32_t value) { - pc_modified_ = true; - registers_[pc] = value; -} - -bool Simulator::has_bad_pc() const { - return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc)); -} - -// Raw access to the PC register without the special adjustment when reading. -int32_t Simulator::get_pc() const { return registers_[pc]; } - -// The MIPS cannot do unaligned reads and writes. On some MIPS platforms an -// interrupt is caused. On others it does a funky rotation thing. For now we -// simply disallow unaligned reads, but at some point we may want to move to -// emulating the rotate behaviour. Note that simulator runs have the runtime -// system running directly on the host system and only generated code is -// executed in the simulator. Since the host is typically IA32 we will not -// get the correct MIPS-like behaviour on unaligned accesses. - -void Simulator::TraceRegWr(int32_t value, TraceType t) { - if (v8_flags.trace_sim) { - union { - int32_t fmt_int32; - float fmt_float; - } v; - v.fmt_int32 = value; - - switch (t) { - case WORD: - SNPrintF(trace_buf_, - "%08" PRIx32 " (%" PRIu64 ") int32:%" PRId32 - " uint32:%" PRIu32, - value, icount_, value, value); - break; - case FLOAT: - SNPrintF(trace_buf_, "%08" PRIx32 " (%" PRIu64 ") flt:%e", - v.fmt_int32, icount_, v.fmt_float); - break; - default: - UNREACHABLE(); - } - } -} - -void Simulator::TraceRegWr(int64_t value, TraceType t) { - if (v8_flags.trace_sim) { - union { - int64_t fmt_int64; - double fmt_double; - } v; - v.fmt_int64 = value; - - switch (t) { - case DWORD: - SNPrintF(trace_buf_, - "%016" PRIx64 " (%" PRIu64 ") int64:%" PRId64 - " uint64:%" PRIu64, - value, icount_, value, value); - break; - case DOUBLE: - SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRIu64 ") dbl:%e", - v.fmt_int64, icount_, v.fmt_double); - break; - default: - UNREACHABLE(); - } - } -} - -template -void Simulator::TraceMSARegWr(T* value, TraceType t) { - if (v8_flags.trace_sim) { - union { - uint8_t b[16]; - uint16_t h[8]; - uint32_t w[4]; - uint64_t d[2]; - float f[4]; - double df[2]; - } v; - memcpy(v.b, value, kSimd128Size); - switch (t) { - case BYTE: - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")", - v.d[0], v.d[1], icount_); - break; - case HALF: - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")", - v.d[0], v.d[1], icount_); - break; - case WORD: - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 - ") int32[0..3]:%" PRId32 " %" PRId32 " %" PRId32 - " %" PRId32, - v.d[0], v.d[1], icount_, v.w[0], v.w[1], v.w[2], v.w[3]); - break; - case DWORD: - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")", - v.d[0], v.d[1], icount_); - break; - case FLOAT: - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 - ") flt[0..3]:%e %e %e %e", - v.d[0], v.d[1], icount_, v.f[0], v.f[1], v.f[2], v.f[3]); - break; - case DOUBLE: - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 - ") dbl[0..1]:%e %e", - v.d[0], v.d[1], icount_, v.df[0], v.df[1]); - break; - default: - UNREACHABLE(); - } - } -} - -template -void Simulator::TraceMSARegWr(T* value) { - if (v8_flags.trace_sim) { - union { - uint8_t b[kMSALanesByte]; - uint16_t h[kMSALanesHalf]; - uint32_t w[kMSALanesWord]; - uint64_t d[kMSALanesDword]; - float f[kMSALanesWord]; - double df[kMSALanesDword]; - } v; - memcpy(v.b, value, kMSALanesByte); - - if (std::is_same::value) { - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 - ") int32[0..3]:%" PRId32 " %" PRId32 " %" PRId32 - " %" PRId32, - v.d[0], v.d[1], icount_, v.w[0], v.w[1], v.w[2], v.w[3]); - } else if (std::is_same::value) { - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 - ") flt[0..3]:%e %e %e %e", - v.d[0], v.d[1], icount_, v.f[0], v.f[1], v.f[2], v.f[3]); - } else if (std::is_same::value) { - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 - ") dbl[0..1]:%e %e", - v.d[0], v.d[1], icount_, v.df[0], v.df[1]); - } else { - SNPrintF(trace_buf_, - "LO: %016" PRIx64 " HI: %016" PRIx64 " (%" PRIu64 ")", - v.d[0], v.d[1], icount_); - } - } -} - -// TODO(plind): consider making icount_ printing a flag option. -void Simulator::TraceMemRd(int32_t addr, int32_t value, TraceType t) { - if (v8_flags.trace_sim) { - union { - int32_t fmt_int32; - float fmt_float; - } v; - v.fmt_int32 = value; - - switch (t) { - case WORD: - SNPrintF(trace_buf_, - "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64 - ") int32:%" PRId32 " uint32:%" PRIu32, - value, addr, icount_, value, value); - break; - case FLOAT: - SNPrintF(trace_buf_, - "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64 ") flt:%e", - v.fmt_int32, addr, icount_, v.fmt_float); - break; - default: - UNREACHABLE(); - } - } -} - -void Simulator::TraceMemWr(int32_t addr, int32_t value, TraceType t) { - if (v8_flags.trace_sim) { - switch (t) { - case BYTE: - SNPrintF(trace_buf_, - " %02" PRIx8 " --> [%08" PRIx32 "] (%" PRIu64 ")", - static_cast(value), addr, icount_); - break; - case HALF: - SNPrintF(trace_buf_, - " %04" PRIx16 " --> [%08" PRIx32 "] (%" PRIu64 ")", - static_cast(value), addr, icount_); - break; - case WORD: - SNPrintF(trace_buf_, - "%08" PRIx32 " --> [%08" PRIx32 "] (%" PRIu64 ")", value, - addr, icount_); - break; - default: - UNREACHABLE(); - } - } -} - -template -void Simulator::TraceMemRd(int32_t addr, T value) { - if (v8_flags.trace_sim) { - switch (sizeof(T)) { - case 1: - SNPrintF(trace_buf_, - "%08" PRIx8 " <-- [%08" PRIx32 "] (%" PRIu64 - ") int8:%" PRId8 " uint8:%" PRIu8, - static_cast(value), addr, icount_, - static_cast(value), static_cast(value)); - break; - case 2: - SNPrintF(trace_buf_, - "%08" PRIx16 " <-- [%08" PRIx32 "] (%" PRIu64 - ") int16:%" PRId16 " uint16:%" PRIu16, - static_cast(value), addr, icount_, - static_cast(value), static_cast(value)); - break; - case 4: - SNPrintF(trace_buf_, - "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64 - ") int32:%" PRId32 " uint32:%" PRIu32, - static_cast(value), addr, icount_, - static_cast(value), static_cast(value)); - break; - case 8: - SNPrintF(trace_buf_, - "%08" PRIx64 " <-- [%08" PRIx32 "] (%" PRIu64 - ") int64:%" PRId64 " uint64:%" PRIu64, - static_cast(value), addr, icount_, - static_cast(value), static_cast(value)); - break; - default: - UNREACHABLE(); - } - } -} - -template -void Simulator::TraceMemWr(int32_t addr, T value) { - if (v8_flags.trace_sim) { - switch (sizeof(T)) { - case 1: - SNPrintF(trace_buf_, - " %02" PRIx8 " --> [%08" PRIx32 "] (%" PRIu64 ")", - static_cast(value), addr, icount_); - break; - case 2: - SNPrintF(trace_buf_, - " %04" PRIx16 " --> [%08" PRIx32 "] (%" PRIu64 ")", - static_cast(value), addr, icount_); - break; - case 4: - SNPrintF(trace_buf_, - "%08" PRIx32 " --> [%08" PRIx32 "] (%" PRIu64 ")", - static_cast(value), addr, icount_); - break; - case 8: - SNPrintF(trace_buf_, - "%16" PRIx64 " --> [%08" PRIx32 "] (%" PRIu64 ")", - static_cast(value), addr, icount_); - break; - default: - UNREACHABLE(); - } - } -} - -void Simulator::TraceMemRd(int32_t addr, int64_t value, TraceType t) { - if (v8_flags.trace_sim) { - union { - int64_t fmt_int64; - int32_t fmt_int32[2]; - float fmt_float[2]; - double fmt_double; - } v; - v.fmt_int64 = value; - - switch (t) { - case DWORD: - SNPrintF(trace_buf_, - "%016" PRIx64 " <-- [%08" PRIx32 "] (%" PRIu64 - ") int64:%" PRId64 " uint64:%" PRIu64, - v.fmt_int64, addr, icount_, v.fmt_int64, v.fmt_int64); - break; - case DOUBLE: - SNPrintF(trace_buf_, - "%016" PRIx64 " <-- [%08" PRIx32 "] (%" PRIu64 - ") dbl:%e", - v.fmt_int64, addr, icount_, v.fmt_double); - break; - case FLOAT_DOUBLE: - SNPrintF(trace_buf_, - "%08" PRIx32 " <-- [%08" PRIx32 "] (%" PRIu64 - ") flt:%e dbl:%e", - v.fmt_int32[1], addr, icount_, v.fmt_float[1], v.fmt_double); - break; - default: - UNREACHABLE(); - } - } -} - -void Simulator::TraceMemWr(int32_t addr, int64_t value, TraceType t) { - if (v8_flags.trace_sim) { - switch (t) { - case DWORD: - SNPrintF(trace_buf_, - "%016" PRIx64 " --> [%08" PRIx32 "] (%" PRIu64 ")", value, - addr, icount_); - break; - default: - UNREACHABLE(); - } - } -} - -int Simulator::ReadW(int32_t addr, Instruction* instr, TraceType t) { - if (addr >= 0 && addr < 0x400) { - // This has to be a nullptr-dereference, drop into debugger. - PrintF("Memory read from bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr, - reinterpret_cast(instr)); - MipsDebugger dbg(this); - dbg.Debug(); - } - if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) { - local_monitor_.NotifyLoad(); - intptr_t* ptr = reinterpret_cast(addr); - switch (t) { - case WORD: - TraceMemRd(addr, static_cast(*ptr), t); - break; - case FLOAT: - // This TraceType is allowed but tracing for this value will be omitted. - break; - default: - UNREACHABLE(); - } - return *ptr; - } - PrintF("Unaligned read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, - reinterpret_cast(instr)); - MipsDebugger dbg(this); - dbg.Debug(); - return 0; -} - -void Simulator::WriteW(int32_t addr, int value, Instruction* instr) { - if (addr >= 0 && addr < 0x400) { - // This has to be a nullptr-dereference, drop into debugger. - PrintF("Memory write to bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr, - reinterpret_cast(instr)); - MipsDebugger dbg(this); - dbg.Debug(); - } - if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) { - local_monitor_.NotifyStore(); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); - intptr_t* ptr = reinterpret_cast(addr); - TraceMemWr(addr, value, WORD); - *ptr = value; - return; - } - PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, - reinterpret_cast(instr)); - MipsDebugger dbg(this); - dbg.Debug(); -} - -void Simulator::WriteConditionalW(int32_t addr, int32_t value, - Instruction* instr, int32_t rt_reg) { - if (addr >= 0 && addr < 0x400) { - // This has to be a nullptr-dereference, drop into debugger. - PrintF("Memory write to bad address: 0x%08x, pc=0x%08" PRIxPTR "\n", addr, - reinterpret_cast(instr)); - MipsDebugger dbg(this); - dbg.Debug(); - } - if ((addr & kPointerAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) { - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) && - GlobalMonitor::Get()->NotifyStoreConditional_Locked( - addr, &global_monitor_thread_)) { - local_monitor_.NotifyStore(); - GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); - TraceMemWr(addr, value, WORD); - int* ptr = reinterpret_cast(addr); - *ptr = value; - set_register(rt_reg, 1); - } else { - set_register(rt_reg, 0); - } - return; - } - PrintF("Unaligned write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, - reinterpret_cast(instr)); - MipsDebugger dbg(this); - dbg.Debug(); -} - -double Simulator::ReadD(int32_t addr, Instruction* instr) { - if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) { - local_monitor_.NotifyLoad(); - double* ptr = reinterpret_cast(addr); - return *ptr; - } - PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, - reinterpret_cast(instr)); - base::OS::Abort(); -} - -void Simulator::WriteD(int32_t addr, double value, Instruction* instr) { - if ((addr & kDoubleAlignmentMask) == 0 || IsMipsArchVariant(kMips32r6)) { - local_monitor_.NotifyStore(); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); - double* ptr = reinterpret_cast(addr); - *ptr = value; - return; - } - PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, - reinterpret_cast(instr)); - base::OS::Abort(); -} - -uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) { - if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) { - local_monitor_.NotifyLoad(); - uint16_t* ptr = reinterpret_cast(addr); - TraceMemRd(addr, static_cast(*ptr)); - return *ptr; - } - PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", - addr, reinterpret_cast(instr)); - base::OS::Abort(); -} - -int16_t Simulator::ReadH(int32_t addr, Instruction* instr) { - if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) { - local_monitor_.NotifyLoad(); - int16_t* ptr = reinterpret_cast(addr); - TraceMemRd(addr, static_cast(*ptr)); - return *ptr; - } - PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", - addr, reinterpret_cast(instr)); - base::OS::Abort(); -} - -void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) { - if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) { - local_monitor_.NotifyStore(); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); - uint16_t* ptr = reinterpret_cast(addr); - TraceMemWr(addr, value, HALF); - *ptr = value; - return; - } - PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", - addr, reinterpret_cast(instr)); - base::OS::Abort(); -} - -void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) { - if ((addr & 1) == 0 || IsMipsArchVariant(kMips32r6)) { - local_monitor_.NotifyStore(); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); - int16_t* ptr = reinterpret_cast(addr); - TraceMemWr(addr, value, HALF); - *ptr = value; - return; - } - PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr, - reinterpret_cast(instr)); - base::OS::Abort(); -} - -uint32_t Simulator::ReadBU(int32_t addr) { - local_monitor_.NotifyLoad(); - uint8_t* ptr = reinterpret_cast(addr); - TraceMemRd(addr, static_cast(*ptr)); - return *ptr & 0xFF; -} - -int32_t Simulator::ReadB(int32_t addr) { - local_monitor_.NotifyLoad(); - int8_t* ptr = reinterpret_cast(addr); - TraceMemRd(addr, static_cast(*ptr)); - return *ptr; -} - -void Simulator::WriteB(int32_t addr, uint8_t value) { - local_monitor_.NotifyStore(); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); - uint8_t* ptr = reinterpret_cast(addr); - TraceMemWr(addr, value, BYTE); - *ptr = value; -} - -void Simulator::WriteB(int32_t addr, int8_t value) { - local_monitor_.NotifyStore(); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); - int8_t* ptr = reinterpret_cast(addr); - TraceMemWr(addr, value, BYTE); - *ptr = value; -} - -template -T Simulator::ReadMem(int32_t addr, Instruction* instr) { - int alignment_mask = (1 << sizeof(T)) - 1; - if ((addr & alignment_mask) == 0 || IsMipsArchVariant(kMips32r6)) { - local_monitor_.NotifyLoad(); - T* ptr = reinterpret_cast(addr); - TraceMemRd(addr, *ptr); - return *ptr; - } - PrintF("Unaligned read of type sizeof(%d) at 0x%08x, pc=0x%08" V8PRIxPTR "\n", - sizeof(T), addr, reinterpret_cast(instr)); - base::OS::Abort(); - return 0; -} - -template -void Simulator::WriteMem(int32_t addr, T value, Instruction* instr) { - local_monitor_.NotifyStore(); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_); - int alignment_mask = (1 << sizeof(T)) - 1; - if ((addr & alignment_mask) == 0 || IsMipsArchVariant(kMips32r6)) { - T* ptr = reinterpret_cast(addr); - *ptr = value; - TraceMemWr(addr, value); - return; - } - PrintF("Unaligned write of type sizeof(%d) at 0x%08x, pc=0x%08" V8PRIxPTR - "\n", - sizeof(T), addr, reinterpret_cast(instr)); - base::OS::Abort(); -} - -// Returns the limit of the stack area to enable checking for stack overflows. -uintptr_t Simulator::StackLimit(uintptr_t c_limit) const { - // The simulator uses a separate JS stack. If we have exhausted the C stack, - // we also drop down the JS limit to reflect the exhaustion on the JS stack. - if (base::Stack::GetCurrentStackPosition() < c_limit) { - return reinterpret_cast(get_sp()); - } - - // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes - // to prevent overrunning the stack when pushing values. - return reinterpret_cast(stack_) + 1024; -} - -// Unsupported instructions use Format to print an error and stop execution. -void Simulator::Format(Instruction* instr, const char* format) { - PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR ": %s\n", - reinterpret_cast(instr), format); - UNIMPLEMENTED_MIPS(); -} - -// Calls into the V8 runtime are based on this very simple interface. -// Note: To be able to return two values from some calls the code in runtime.cc -// uses the ObjectPair which is essentially two 32-bit values stuffed into a -// 64-bit value. With the code below we assume that all runtime calls return -// 64 bits of result. If they don't, the v1 result register contains a bogus -// value, which is fine because it is caller-saved. -using SimulatorRuntimeCall = int64_t (*)( - int32_t arg0, int32_t arg1, int32_t arg2, int32_t arg3, int32_t arg4, - int32_t arg5, int32_t arg6, int32_t arg7, int32_t arg8, int32_t arg9, - int32_t arg10, int32_t arg11, int32_t arg12, int32_t arg13, int32_t arg14, - int32_t arg15, int32_t arg16, int32_t arg17, int32_t arg18, int32_t arg19); - -// These prototypes handle the four types of FP calls. -using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1); -using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1); -using SimulatorRuntimeFPCall = double (*)(double darg0); -using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0); - -// This signature supports direct call in to API function native callback -// (refer to InvocationCallback in v8.h). -using SimulatorRuntimeDirectApiCall = void (*)(int32_t arg0); -using SimulatorRuntimeProfilingApiCall = void (*)(int32_t arg0, void* arg1); - -// This signature supports direct call to accessor getter callback. -using SimulatorRuntimeDirectGetterCall = void (*)(int32_t arg0, int32_t arg1); -using SimulatorRuntimeProfilingGetterCall = void (*)(int32_t arg0, int32_t arg1, - void* arg2); - -// Software interrupt instructions are used by the simulator to call into the -// C-based V8 runtime. They are also used for debugging with simulator. -void Simulator::SoftwareInterrupt() { - // There are several instructions that could get us here, - // the break_ instruction, or several variants of traps. All - // Are "SPECIAL" class opcode, and are distinuished by function. - int32_t func = instr_.FunctionFieldRaw(); - uint32_t code = (func == BREAK) ? instr_.Bits(25, 6) : -1; - - // We first check if we met a call_rt_redirected. - if (instr_.InstructionBits() == rtCallRedirInstr) { - Redirection* redirection = Redirection::FromInstruction(instr_.instr()); - int32_t arg0 = get_register(a0); - int32_t arg1 = get_register(a1); - int32_t arg2 = get_register(a2); - int32_t arg3 = get_register(a3); - - int32_t* stack_pointer = reinterpret_cast(get_register(sp)); - // Args 4 and 5 are on the stack after the reserved space for args 0..3. - int32_t arg4 = stack_pointer[4]; - int32_t arg5 = stack_pointer[5]; - int32_t arg6 = stack_pointer[6]; - int32_t arg7 = stack_pointer[7]; - int32_t arg8 = stack_pointer[8]; - int32_t arg9 = stack_pointer[9]; - int32_t arg10 = stack_pointer[10]; - int32_t arg11 = stack_pointer[11]; - int32_t arg12 = stack_pointer[12]; - int32_t arg13 = stack_pointer[13]; - int32_t arg14 = stack_pointer[14]; - int32_t arg15 = stack_pointer[15]; - int32_t arg16 = stack_pointer[16]; - int32_t arg17 = stack_pointer[17]; - int32_t arg18 = stack_pointer[18]; - int32_t arg19 = stack_pointer[19]; - static_assert(kMaxCParameters == 20); - - bool fp_call = - (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) || - (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) || - (redirection->type() == ExternalReference::BUILTIN_FP_CALL) || - (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL); - - if (!IsMipsSoftFloatABI) { - // With the hard floating point calling convention, double - // arguments are passed in FPU registers. Fetch the arguments - // from there and call the builtin using soft floating point - // convention. - switch (redirection->type()) { - case ExternalReference::BUILTIN_FP_FP_CALL: - case ExternalReference::BUILTIN_COMPARE_CALL: - if (IsFp64Mode()) { - arg0 = get_fpu_register_word(f12); - arg1 = get_fpu_register_hi_word(f12); - arg2 = get_fpu_register_word(f14); - arg3 = get_fpu_register_hi_word(f14); - } else { - arg0 = get_fpu_register_word(f12); - arg1 = get_fpu_register_word(f13); - arg2 = get_fpu_register_word(f14); - arg3 = get_fpu_register_word(f15); - } - break; - case ExternalReference::BUILTIN_FP_CALL: - if (IsFp64Mode()) { - arg0 = get_fpu_register_word(f12); - arg1 = get_fpu_register_hi_word(f12); - } else { - arg0 = get_fpu_register_word(f12); - arg1 = get_fpu_register_word(f13); - } - break; - case ExternalReference::BUILTIN_FP_INT_CALL: - if (IsFp64Mode()) { - arg0 = get_fpu_register_word(f12); - arg1 = get_fpu_register_hi_word(f12); - } else { - arg0 = get_fpu_register_word(f12); - arg1 = get_fpu_register_word(f13); - } - arg2 = get_register(a2); - break; - default: - break; - } - } - - // This is dodgy but it works because the C entry stubs are never moved. - // See comment in codegen-arm.cc and bug 1242173. - int32_t saved_ra = get_register(ra); - - intptr_t external = - reinterpret_cast(redirection->external_function()); - - // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware - // FPU, or gcc soft-float routines. Hardware FPU is simulated in this - // simulator. Soft-float has additional abstraction of ExternalReference, - // to support serialization. - if (fp_call) { - double dval0, dval1; // one or two double parameters - int32_t ival; // zero or one integer parameters - int64_t iresult = 0; // integer return value - double dresult = 0; // double return value - GetFpArgs(&dval0, &dval1, &ival); - SimulatorRuntimeCall generic_target = - reinterpret_cast(external); - if (v8_flags.trace_sim) { - switch (redirection->type()) { - case ExternalReference::BUILTIN_FP_FP_CALL: - case ExternalReference::BUILTIN_COMPARE_CALL: - PrintF("Call to host function at %p with args %f, %f", - reinterpret_cast(FUNCTION_ADDR(generic_target)), - dval0, dval1); - break; - case ExternalReference::BUILTIN_FP_CALL: - PrintF("Call to host function at %p with arg %f", - reinterpret_cast(FUNCTION_ADDR(generic_target)), - dval0); - break; - case ExternalReference::BUILTIN_FP_INT_CALL: - PrintF("Call to host function at %p with args %f, %d", - reinterpret_cast(FUNCTION_ADDR(generic_target)), - dval0, ival); - break; - default: - UNREACHABLE(); - } - } - switch (redirection->type()) { - case ExternalReference::BUILTIN_COMPARE_CALL: { - SimulatorRuntimeCompareCall target = - reinterpret_cast(external); - iresult = target(dval0, dval1); - set_register(v0, static_cast(iresult)); - set_register(v1, static_cast(iresult >> 32)); - break; - } - case ExternalReference::BUILTIN_FP_FP_CALL: { - SimulatorRuntimeFPFPCall target = - reinterpret_cast(external); - dresult = target(dval0, dval1); - SetFpResult(dresult); - break; - } - case ExternalReference::BUILTIN_FP_CALL: { - SimulatorRuntimeFPCall target = - reinterpret_cast(external); - dresult = target(dval0); - SetFpResult(dresult); - break; - } - case ExternalReference::BUILTIN_FP_INT_CALL: { - SimulatorRuntimeFPIntCall target = - reinterpret_cast(external); - dresult = target(dval0, ival); - SetFpResult(dresult); - break; - } - default: - UNREACHABLE(); - } - if (v8_flags.trace_sim) { - switch (redirection->type()) { - case ExternalReference::BUILTIN_COMPARE_CALL: - PrintF("Returned %08x\n", static_cast(iresult)); - break; - case ExternalReference::BUILTIN_FP_FP_CALL: - case ExternalReference::BUILTIN_FP_CALL: - case ExternalReference::BUILTIN_FP_INT_CALL: - PrintF("Returned %f\n", dresult); - break; - default: - UNREACHABLE(); - } - } - } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { - if (v8_flags.trace_sim) { - PrintF("Call to host function at %p args %08x\n", - reinterpret_cast(external), arg0); - } - SimulatorRuntimeDirectApiCall target = - reinterpret_cast(external); - target(arg0); - } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) { - if (v8_flags.trace_sim) { - PrintF("Call to host function at %p args %08x %08x\n", - reinterpret_cast(external), arg0, arg1); - } - SimulatorRuntimeProfilingApiCall target = - reinterpret_cast(external); - target(arg0, Redirection::UnwrapRedirection(arg1)); - } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { - if (v8_flags.trace_sim) { - PrintF("Call to host function at %p args %08x %08x\n", - reinterpret_cast(external), arg0, arg1); - } - SimulatorRuntimeDirectGetterCall target = - reinterpret_cast(external); - target(arg0, arg1); - } else if (redirection->type() == - ExternalReference::PROFILING_GETTER_CALL) { - if (v8_flags.trace_sim) { - PrintF("Call to host function at %p args %08x %08x %08x\n", - reinterpret_cast(external), arg0, arg1, arg2); - } - SimulatorRuntimeProfilingGetterCall target = - reinterpret_cast(external); - target(arg0, arg1, Redirection::UnwrapRedirection(arg2)); - } else { - DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL || - redirection->type() == ExternalReference::BUILTIN_CALL_PAIR); - SimulatorRuntimeCall target = - reinterpret_cast(external); - if (v8_flags.trace_sim) { - PrintF( - "Call to host function at %p " - "args %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08x, %08xi, " - "%08xi, %08xi, %08xi, %08xi, %08xi, %08xi, %08xi, %08xi, %08xi, " - "%08xi\n", - reinterpret_cast(FUNCTION_ADDR(target)), arg0, arg1, arg2, - arg3, arg4, arg5, arg6, arg7, arg8, arg9, arg10, arg11, arg12, - arg13, arg14, arg15, arg16, arg17, arg18, arg19); - } - int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, - arg8, arg9, arg10, arg11, arg12, arg13, arg14, - arg15, arg16, arg17, arg18, arg19); - set_register(v0, static_cast(result)); - set_register(v1, static_cast(result >> 32)); - } - if (v8_flags.trace_sim) { - PrintF("Returned %08x : %08x\n", get_register(v1), get_register(v0)); - } - set_register(ra, saved_ra); - set_pc(get_register(ra)); - - } else if (func == BREAK && code <= kMaxStopCode) { - if (IsWatchpoint(code)) { - PrintWatchpoint(code); - } else { - IncreaseStopCounter(code); - HandleStop(code, instr_.instr()); - } - } else { - // All remaining break_ codes, and all traps are handled here. - MipsDebugger dbg(this); - dbg.Debug(); - } -} - -// Stop helper functions. -bool Simulator::IsWatchpoint(uint32_t code) { - return (code <= kMaxWatchpointCode); -} - -void Simulator::PrintWatchpoint(uint32_t code) { - MipsDebugger dbg(this); - ++break_count_; - PrintF("\n---- break %d marker: %3d (instr count: %" PRIu64 - ") ----------" - "----------------------------------", - code, break_count_, icount_); - dbg.PrintAllRegs(); // Print registers and continue running. -} - -void Simulator::HandleStop(uint32_t code, Instruction* instr) { - // Stop if it is enabled, otherwise go on jumping over the stop - // and the message address. - if (IsEnabledStop(code)) { - MipsDebugger dbg(this); - dbg.Stop(instr); - } -} - -bool Simulator::IsStopInstruction(Instruction* instr) { - int32_t func = instr->FunctionFieldRaw(); - uint32_t code = static_cast(instr->Bits(25, 6)); - return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode; -} - -bool Simulator::IsEnabledStop(uint32_t code) { - DCHECK_LE(code, kMaxStopCode); - DCHECK_GT(code, kMaxWatchpointCode); - return !(watched_stops_[code].count & kStopDisabledBit); -} - -void Simulator::EnableStop(uint32_t code) { - if (!IsEnabledStop(code)) { - watched_stops_[code].count &= ~kStopDisabledBit; - } -} - -void Simulator::DisableStop(uint32_t code) { - if (IsEnabledStop(code)) { - watched_stops_[code].count |= kStopDisabledBit; - } -} - -void Simulator::IncreaseStopCounter(uint32_t code) { - DCHECK_LE(code, kMaxStopCode); - if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) { - PrintF( - "Stop counter for code %i has overflowed.\n" - "Enabling this code and reseting the counter to 0.\n", - code); - watched_stops_[code].count = 0; - EnableStop(code); - } else { - watched_stops_[code].count++; - } -} - -// Print a stop status. -void Simulator::PrintStopInfo(uint32_t code) { - if (code <= kMaxWatchpointCode) { - PrintF("That is a watchpoint, not a stop.\n"); - return; - } else if (code > kMaxStopCode) { - PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1); - return; - } - const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled"; - int32_t count = watched_stops_[code].count & ~kStopDisabledBit; - // Don't print the state of unused breakpoints. - if (count != 0) { - if (watched_stops_[code].desc) { - PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code, state, - count, watched_stops_[code].desc); - } else { - PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state, - count); - } - } -} - -void Simulator::SignalException(Exception e) { - FATAL("Error: Exception %i raised.", static_cast(e)); -} - -// Min/Max template functions for Double and Single arguments. - -template -static T FPAbs(T a); - -template <> -double FPAbs(double a) { - return fabs(a); -} - -template <> -float FPAbs(float a) { - return fabsf(a); -} - -template -static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) { - if (std::isnan(a) && std::isnan(b)) { - *result = a; - } else if (std::isnan(a)) { - *result = b; - } else if (std::isnan(b)) { - *result = a; - } else if (b == a) { - // Handle -0.0 == 0.0 case. - // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax - // negates the result. - *result = std::signbit(b) - static_cast(kind) ? b : a; - } else { - return false; - } - return true; -} - -template -static T FPUMin(T a, T b) { - T result; - if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { - return result; - } else { - return b < a ? b : a; - } -} - -template -static T FPUMax(T a, T b) { - T result; - if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) { - return result; - } else { - return b > a ? b : a; - } -} - -template -static T FPUMinA(T a, T b) { - T result; - if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { - if (FPAbs(a) < FPAbs(b)) { - result = a; - } else if (FPAbs(b) < FPAbs(a)) { - result = b; - } else { - result = a < b ? a : b; - } - } - return result; -} - -template -static T FPUMaxA(T a, T b) { - T result; - if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) { - if (FPAbs(a) > FPAbs(b)) { - result = a; - } else if (FPAbs(b) > FPAbs(a)) { - result = b; - } else { - result = a > b ? a : b; - } - } - return result; -} - -enum class KeepSign : bool { no = false, yes }; - -template ::value, - int>::type = 0> -T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) { - DCHECK(std::isnan(arg)); - T qNaN = std::numeric_limits::quiet_NaN(); - if (keepSign == KeepSign::yes) { - return std::copysign(qNaN, result); - } - return qNaN; -} - -template -T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) { - if (std::isnan(first)) { - return FPUCanonalizeNaNArg(result, first, keepSign); - } - return result; -} - -template -T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) { - if (std::isnan(first)) { - return FPUCanonalizeNaNArg(result, first, keepSign); - } - return FPUCanonalizeNaNArgs(result, keepSign, args...); -} - -template -T FPUCanonalizeOperation(Func f, T first, Args... args) { - return FPUCanonalizeOperation(f, KeepSign::no, first, args...); -} - -template -T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) { - T result = f(first, args...); - if (std::isnan(result)) { - result = FPUCanonalizeNaNArgs(result, keepSign, first, args...); - } - return result; -} - -// Handle execution based on instruction types. - -void Simulator::DecodeTypeRegisterDRsType() { - double ft, fs, fd; - uint32_t cc, fcsr_cc; - int64_t i64; - fs = get_fpu_register_double(fs_reg()); - ft = (instr_.FunctionFieldRaw() != MOVF) ? get_fpu_register_double(ft_reg()) - : 0.0; - fd = get_fpu_register_double(fd_reg()); - int64_t ft_int = base::bit_cast(ft); - int64_t fd_int = base::bit_cast(fd); - cc = instr_.FCccValue(); - fcsr_cc = get_fcsr_condition_bit(cc); - switch (instr_.FunctionFieldRaw()) { - case RINT: { - DCHECK(IsMipsArchVariant(kMips32r6)); - double result, temp, temp_result; - double upper = std::ceil(fs); - double lower = std::floor(fs); - switch (get_fcsr_rounding_mode()) { - case kRoundToNearest: - if (upper - fs < fs - lower) { - result = upper; - } else if (upper - fs > fs - lower) { - result = lower; - } else { - temp_result = upper / 2; - double reminder = modf(temp_result, &temp); - if (reminder == 0) { - result = upper; - } else { - result = lower; - } - } - break; - case kRoundToZero: - result = (fs > 0 ? lower : upper); - break; - case kRoundToPlusInf: - result = upper; - break; - case kRoundToMinusInf: - result = lower; - break; - } - SetFPUDoubleResult(fd_reg(), result); - if (result != fs) { - set_fcsr_bit(kFCSRInexactFlagBit, true); - } - break; - } - case SEL: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUDoubleResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft); - break; - case SELEQZ_C: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) == 0 ? fs : 0.0); - break; - case SELNEZ_C: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUDoubleResult(fd_reg(), (ft_int & 0x1) != 0 ? fs : 0.0); - break; - case MOVZ_C: { - DCHECK(IsMipsArchVariant(kMips32r2)); - if (rt() == 0) { - SetFPUDoubleResult(fd_reg(), fs); - } - break; - } - case MOVN_C: { - DCHECK(IsMipsArchVariant(kMips32r2)); - int32_t rt_reg = instr_.RtValue(); - int32_t rt = get_register(rt_reg); - if (rt != 0) { - SetFPUDoubleResult(fd_reg(), fs); - } - break; - } - case MOVF: { - // Same function field for MOVT.D and MOVF.D - uint32_t ft_cc = (ft_reg() >> 2) & 0x7; - ft_cc = get_fcsr_condition_bit(ft_cc); - if (instr_.Bit(16)) { // Read Tf bit. - // MOVT.D - if (test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs); - } else { - // MOVF.D - if (!test_fcsr_bit(ft_cc)) SetFPUDoubleResult(fd_reg(), fs); - } - break; - } - case MIN: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUDoubleResult(fd_reg(), FPUMin(ft, fs)); - break; - case MAX: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUDoubleResult(fd_reg(), FPUMax(ft, fs)); - break; - case MINA: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUDoubleResult(fd_reg(), FPUMinA(ft, fs)); - break; - case MAXA: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUDoubleResult(fd_reg(), FPUMaxA(ft, fs)); - break; - case ADD_D: - SetFPUDoubleResult( - fd_reg(), - FPUCanonalizeOperation( - [](double lhs, double rhs) { return lhs + rhs; }, fs, ft)); - break; - case SUB_D: - SetFPUDoubleResult( - fd_reg(), - FPUCanonalizeOperation( - [](double lhs, double rhs) { return lhs - rhs; }, fs, ft)); - break; - case MADDF_D: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUDoubleResult(fd_reg(), std::fma(fs, ft, fd)); - break; - case MSUBF_D: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUDoubleResult(fd_reg(), std::fma(-fs, ft, fd)); - break; - case MUL_D: - SetFPUDoubleResult( - fd_reg(), - FPUCanonalizeOperation( - [](double lhs, double rhs) { return lhs * rhs; }, fs, ft)); - break; - case DIV_D: - SetFPUDoubleResult( - fd_reg(), - FPUCanonalizeOperation( - [](double lhs, double rhs) { return lhs / rhs; }, fs, ft)); - break; - case ABS_D: - SetFPUDoubleResult( - fd_reg(), - FPUCanonalizeOperation([](double fs) { return FPAbs(fs); }, fs)); - break; - case MOV_D: - SetFPUDoubleResult(fd_reg(), fs); - break; - case NEG_D: - SetFPUDoubleResult(fd_reg(), - FPUCanonalizeOperation([](double src) { return -src; }, - KeepSign::yes, fs)); - break; - case SQRT_D: - SetFPUDoubleResult( - fd_reg(), - FPUCanonalizeOperation([](double fs) { return std::sqrt(fs); }, fs)); - break; - case RSQRT_D: - SetFPUDoubleResult( - fd_reg(), FPUCanonalizeOperation( - [](double fs) { return 1.0 / std::sqrt(fs); }, fs)); - break; - case RECIP_D: - SetFPUDoubleResult(fd_reg(), FPUCanonalizeOperation( - [](double fs) { return 1.0 / fs; }, fs)); - break; - case C_UN_D: - set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft)); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_EQ_D: - set_fcsr_bit(fcsr_cc, (fs == ft)); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_UEQ_D: - set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft))); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_OLT_D: - set_fcsr_bit(fcsr_cc, (fs < ft)); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_ULT_D: - set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft))); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_OLE_D: - set_fcsr_bit(fcsr_cc, (fs <= ft)); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_ULE_D: - set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft))); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case CVT_W_D: { // Convert double to word. - double rounded; - int32_t result; - round_according_to_fcsr(fs, &rounded, &result, fs); - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - } break; - case ROUND_W_D: // Round double to word (round half to even). - { - double rounded = std::floor(fs + 0.5); - int32_t result = static_cast(rounded); - if ((result & 1) != 0 && result - fs == 0.5) { - // If the number is halfway between two integers, - // round to the even one. - result--; - } - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - } break; - case TRUNC_W_D: // Truncate double to word (round towards 0). - { - double rounded = trunc(fs); - int32_t result = static_cast(rounded); - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - } break; - case FLOOR_W_D: // Round double to word towards negative infinity. - { - double rounded = std::floor(fs); - int32_t result = static_cast(rounded); - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - } break; - case CEIL_W_D: // Round double to word towards positive infinity. - { - double rounded = std::ceil(fs); - int32_t result = static_cast(rounded); - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - } break; - case CVT_S_D: // Convert double to float (single). - SetFPUFloatResult(fd_reg(), static_cast(fs)); - break; - case CVT_L_D: { // Mips32r2: Truncate double to 64-bit long-word. - if (IsFp64Mode()) { - int64_t result; - double rounded; - round64_according_to_fcsr(fs, &rounded, &result, fs); - SetFPUResult(fd_reg(), result); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case TRUNC_L_D: { // Mips32r2 instruction. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - double rounded = trunc(fs); - i64 = static_cast(rounded); - if (IsFp64Mode()) { - SetFPUResult(fd_reg(), i64); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case ROUND_L_D: { // Mips32r2 instruction. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - double rounded = std::floor(fs + 0.5); - int64_t result = static_cast(rounded); - if ((result & 1) != 0 && result - fs == 0.5) { - // If the number is halfway between two integers, - // round to the even one. - result--; - } - int64_t i64 = static_cast(result); - if (IsFp64Mode()) { - SetFPUResult(fd_reg(), i64); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case FLOOR_L_D: { // Mips32r2 instruction. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - double rounded = std::floor(fs); - int64_t i64 = static_cast(rounded); - if (IsFp64Mode()) { - SetFPUResult(fd_reg(), i64); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case CEIL_L_D: { // Mips32r2 instruction. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - double rounded = std::ceil(fs); - int64_t i64 = static_cast(rounded); - if (IsFp64Mode()) { - SetFPUResult(fd_reg(), i64); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case CLASS_D: { // Mips32r6 instruction - // Convert double input to uint64_t for easier bit manipulation - uint64_t classed = base::bit_cast(fs); - - // Extracting sign, exponent and mantissa from the input double - uint32_t sign = (classed >> 63) & 1; - uint32_t exponent = (classed >> 52) & 0x00000000000007FF; - uint64_t mantissa = classed & 0x000FFFFFFFFFFFFF; - uint64_t result; - double dResult; - - // Setting flags if input double is negative infinity, - // positive infinity, negative zero or positive zero - bool negInf = (classed == 0xFFF0000000000000); - bool posInf = (classed == 0x7FF0000000000000); - bool negZero = (classed == 0x8000000000000000); - bool posZero = (classed == 0x0000000000000000); - - bool signalingNan; - bool quietNan; - bool negSubnorm; - bool posSubnorm; - bool negNorm; - bool posNorm; - - // Setting flags if double is NaN - signalingNan = false; - quietNan = false; - if (!negInf && !posInf && exponent == 0x7FF) { - quietNan = ((mantissa & 0x0008000000000000) != 0) && - ((mantissa & (0x0008000000000000 - 1)) == 0); - signalingNan = !quietNan; - } - - // Setting flags if double is subnormal number - posSubnorm = false; - negSubnorm = false; - if ((exponent == 0) && (mantissa != 0)) { - DCHECK(sign == 0 || sign == 1); - posSubnorm = (sign == 0); - negSubnorm = (sign == 1); - } - - // Setting flags if double is normal number - posNorm = false; - negNorm = false; - if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan && - !quietNan && !negZero && !posZero) { - DCHECK(sign == 0 || sign == 1); - posNorm = (sign == 0); - negNorm = (sign == 1); - } - - // Calculating result according to description of CLASS.D instruction - result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) | - (posInf << 6) | (negZero << 5) | (negSubnorm << 4) | - (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan; - - DCHECK_NE(result, 0); - - dResult = base::bit_cast(result); - SetFPUDoubleResult(fd_reg(), dResult); - - break; - } - case C_F_D: { - set_fcsr_bit(fcsr_cc, false); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - } - default: - UNREACHABLE(); - } -} - -void Simulator::DecodeTypeRegisterWRsType() { - float fs = get_fpu_register_float(fs_reg()); - float ft = get_fpu_register_float(ft_reg()); - int32_t alu_out = 0x12345678; - switch (instr_.FunctionFieldRaw()) { - case CVT_S_W: // Convert word to float (single). - alu_out = get_fpu_register_signed_word(fs_reg()); - SetFPUFloatResult(fd_reg(), static_cast(alu_out)); - break; - case CVT_D_W: // Convert word to double. - alu_out = get_fpu_register_signed_word(fs_reg()); - SetFPUDoubleResult(fd_reg(), static_cast(alu_out)); - break; - case CMP_AF: - SetFPUWordResult(fd_reg(), 0); - break; - case CMP_UN: - if (std::isnan(fs) || std::isnan(ft)) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - case CMP_EQ: - if (fs == ft) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - case CMP_UEQ: - if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - case CMP_LT: - if (fs < ft) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - case CMP_ULT: - if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - case CMP_LE: - if (fs <= ft) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - case CMP_ULE: - if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - case CMP_OR: - if (!std::isnan(fs) && !std::isnan(ft)) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - case CMP_UNE: - if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - case CMP_NE: - if (fs != ft) { - SetFPUWordResult(fd_reg(), -1); - } else { - SetFPUWordResult(fd_reg(), 0); - } - break; - default: - UNREACHABLE(); - } -} - -void Simulator::DecodeTypeRegisterSRsType() { - float fs, ft, fd; - fs = get_fpu_register_float(fs_reg()); - ft = get_fpu_register_float(ft_reg()); - fd = get_fpu_register_float(fd_reg()); - int32_t ft_int = base::bit_cast(ft); - int32_t fd_int = base::bit_cast(fd); - uint32_t cc, fcsr_cc; - cc = instr_.FCccValue(); - fcsr_cc = get_fcsr_condition_bit(cc); - switch (instr_.FunctionFieldRaw()) { - case RINT: { - DCHECK(IsMipsArchVariant(kMips32r6)); - float result, temp_result; - double temp; - float upper = std::ceil(fs); - float lower = std::floor(fs); - switch (get_fcsr_rounding_mode()) { - case kRoundToNearest: - if (upper - fs < fs - lower) { - result = upper; - } else if (upper - fs > fs - lower) { - result = lower; - } else { - temp_result = upper / 2; - float reminder = modf(temp_result, &temp); - if (reminder == 0) { - result = upper; - } else { - result = lower; - } - } - break; - case kRoundToZero: - result = (fs > 0 ? lower : upper); - break; - case kRoundToPlusInf: - result = upper; - break; - case kRoundToMinusInf: - result = lower; - break; - } - SetFPUFloatResult(fd_reg(), result); - if (result != fs) { - set_fcsr_bit(kFCSRInexactFlagBit, true); - } - break; - } - case ADD_S: - SetFPUFloatResult( - fd_reg(), - FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; }, - fs, ft)); - break; - case SUB_S: - SetFPUFloatResult( - fd_reg(), - FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; }, - fs, ft)); - break; - case MADDF_S: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUFloatResult(fd_reg(), std::fma(fs, ft, fd)); - break; - case MSUBF_S: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUFloatResult(fd_reg(), std::fma(-fs, ft, fd)); - break; - case MUL_S: - SetFPUFloatResult( - fd_reg(), - FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; }, - fs, ft)); - break; - case DIV_S: - SetFPUFloatResult( - fd_reg(), - FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; }, - fs, ft)); - break; - case ABS_S: - SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation( - [](float fs) { return FPAbs(fs); }, fs)); - break; - case MOV_S: - SetFPUFloatResult(fd_reg(), fs); - break; - case NEG_S: - SetFPUFloatResult(fd_reg(), - FPUCanonalizeOperation([](float src) { return -src; }, - KeepSign::yes, fs)); - break; - case SQRT_S: - SetFPUFloatResult( - fd_reg(), - FPUCanonalizeOperation([](float src) { return std::sqrt(src); }, fs)); - break; - case RSQRT_S: - SetFPUFloatResult( - fd_reg(), FPUCanonalizeOperation( - [](float src) { return 1.0 / std::sqrt(src); }, fs)); - break; - case RECIP_S: - SetFPUFloatResult(fd_reg(), FPUCanonalizeOperation( - [](float src) { return 1.0 / src; }, fs)); - break; - case C_F_D: - set_fcsr_bit(fcsr_cc, false); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_UN_D: - set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft)); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_EQ_D: - set_fcsr_bit(fcsr_cc, (fs == ft)); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_UEQ_D: - set_fcsr_bit(fcsr_cc, (fs == ft) || (std::isnan(fs) || std::isnan(ft))); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_OLT_D: - set_fcsr_bit(fcsr_cc, (fs < ft)); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_ULT_D: - set_fcsr_bit(fcsr_cc, (fs < ft) || (std::isnan(fs) || std::isnan(ft))); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_OLE_D: - set_fcsr_bit(fcsr_cc, (fs <= ft)); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case C_ULE_D: - set_fcsr_bit(fcsr_cc, (fs <= ft) || (std::isnan(fs) || std::isnan(ft))); - TraceRegWr(test_fcsr_bit(fcsr_cc)); - break; - case CVT_D_S: - SetFPUDoubleResult(fd_reg(), static_cast(fs)); - break; - case SEL: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUFloatResult(fd_reg(), (fd_int & 0x1) == 0 ? fs : ft); - break; - case CLASS_S: { // Mips32r6 instruction - // Convert float input to uint32_t for easier bit manipulation - float fs = get_fpu_register_float(fs_reg()); - uint32_t classed = base::bit_cast(fs); - - // Extracting sign, exponent and mantissa from the input float - uint32_t sign = (classed >> 31) & 1; - uint32_t exponent = (classed >> 23) & 0x000000FF; - uint32_t mantissa = classed & 0x007FFFFF; - uint32_t result; - float fResult; - - // Setting flags if input float is negative infinity, - // positive infinity, negative zero or positive zero - bool negInf = (classed == 0xFF800000); - bool posInf = (classed == 0x7F800000); - bool negZero = (classed == 0x80000000); - bool posZero = (classed == 0x00000000); - - bool signalingNan; - bool quietNan; - bool negSubnorm; - bool posSubnorm; - bool negNorm; - bool posNorm; - - // Setting flags if float is NaN - signalingNan = false; - quietNan = false; - if (!negInf && !posInf && (exponent == 0xFF)) { - quietNan = ((mantissa & 0x00200000) == 0) && - ((mantissa & (0x00200000 - 1)) == 0); - signalingNan = !quietNan; - } - - // Setting flags if float is subnormal number - posSubnorm = false; - negSubnorm = false; - if ((exponent == 0) && (mantissa != 0)) { - DCHECK(sign == 0 || sign == 1); - posSubnorm = (sign == 0); - negSubnorm = (sign == 1); - } - - // Setting flags if float is normal number - posNorm = false; - negNorm = false; - if (!posSubnorm && !negSubnorm && !posInf && !negInf && !signalingNan && - !quietNan && !negZero && !posZero) { - DCHECK(sign == 0 || sign == 1); - posNorm = (sign == 0); - negNorm = (sign == 1); - } - - // Calculating result according to description of CLASS.S instruction - result = (posZero << 9) | (posSubnorm << 8) | (posNorm << 7) | - (posInf << 6) | (negZero << 5) | (negSubnorm << 4) | - (negNorm << 3) | (negInf << 2) | (quietNan << 1) | signalingNan; - - DCHECK_NE(result, 0); - - fResult = base::bit_cast(result); - SetFPUFloatResult(fd_reg(), fResult); - - break; - } - case SELEQZ_C: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUFloatResult(fd_reg(), (ft_int & 0x1) == 0 - ? get_fpu_register_float(fs_reg()) - : 0.0); - break; - case SELNEZ_C: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUFloatResult(fd_reg(), (ft_int & 0x1) != 0 - ? get_fpu_register_float(fs_reg()) - : 0.0); - break; - case MOVZ_C: { - DCHECK(IsMipsArchVariant(kMips32r2)); - if (rt() == 0) { - SetFPUFloatResult(fd_reg(), fs); - } - break; - } - case MOVN_C: { - DCHECK(IsMipsArchVariant(kMips32r2)); - if (rt() != 0) { - SetFPUFloatResult(fd_reg(), fs); - } - break; - } - case MOVF: { - // Same function field for MOVT.D and MOVF.D - uint32_t ft_cc = (ft_reg() >> 2) & 0x7; - ft_cc = get_fcsr_condition_bit(ft_cc); - - if (instr_.Bit(16)) { // Read Tf bit. - // MOVT.D - if (test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs); - } else { - // MOVF.D - if (!test_fcsr_bit(ft_cc)) SetFPUFloatResult(fd_reg(), fs); - } - break; - } - case TRUNC_W_S: { // Truncate single to word (round towards 0). - float rounded = trunc(fs); - int32_t result = static_cast(rounded); - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - } break; - case TRUNC_L_S: { // Mips32r2 instruction. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - float rounded = trunc(fs); - int64_t i64 = static_cast(rounded); - if (IsFp64Mode()) { - SetFPUResult(fd_reg(), i64); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case FLOOR_W_S: // Round double to word towards negative infinity. - { - float rounded = std::floor(fs); - int32_t result = static_cast(rounded); - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - } break; - case FLOOR_L_S: { // Mips32r2 instruction. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - float rounded = std::floor(fs); - int64_t i64 = static_cast(rounded); - if (IsFp64Mode()) { - SetFPUResult(fd_reg(), i64); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case ROUND_W_S: { - float rounded = std::floor(fs + 0.5); - int32_t result = static_cast(rounded); - if ((result & 1) != 0 && result - fs == 0.5) { - // If the number is halfway between two integers, - // round to the even one. - result--; - } - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - break; - } - case ROUND_L_S: { // Mips32r2 instruction. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - float rounded = std::floor(fs + 0.5); - int64_t result = static_cast(rounded); - if ((result & 1) != 0 && result - fs == 0.5) { - // If the number is halfway between two integers, - // round to the even one. - result--; - } - int64_t i64 = static_cast(result); - if (IsFp64Mode()) { - SetFPUResult(fd_reg(), i64); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case CEIL_W_S: // Round double to word towards positive infinity. - { - float rounded = std::ceil(fs); - int32_t result = static_cast(rounded); - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - } break; - case CEIL_L_S: { // Mips32r2 instruction. - DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)); - float rounded = std::ceil(fs); - int64_t i64 = static_cast(rounded); - if (IsFp64Mode()) { - SetFPUResult(fd_reg(), i64); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case MIN: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUFloatResult(fd_reg(), FPUMin(ft, fs)); - break; - case MAX: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUFloatResult(fd_reg(), FPUMax(ft, fs)); - break; - case MINA: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUFloatResult(fd_reg(), FPUMinA(ft, fs)); - break; - case MAXA: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetFPUFloatResult(fd_reg(), FPUMaxA(ft, fs)); - break; - case CVT_L_S: { - if (IsFp64Mode()) { - int64_t result; - float rounded; - round64_according_to_fcsr(fs, &rounded, &result, fs); - SetFPUResult(fd_reg(), result); - if (set_fcsr_round64_error(fs, rounded)) { - set_fpu_register_invalid_result64(fs, rounded); - } - } else { - UNSUPPORTED(); - } - break; - } - case CVT_W_S: { - float rounded; - int32_t result; - round_according_to_fcsr(fs, &rounded, &result, fs); - SetFPUWordResult(fd_reg(), result); - if (set_fcsr_round_error(fs, rounded)) { - set_fpu_register_word_invalid_result(fs, rounded); - } - break; - } - default: - // CVT_W_S CVT_L_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S - // CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented. - UNREACHABLE(); - } -} - -void Simulator::DecodeTypeRegisterLRsType() { - double fs = get_fpu_register_double(fs_reg()); - double ft = get_fpu_register_double(ft_reg()); - switch (instr_.FunctionFieldRaw()) { - case CVT_D_L: // Mips32r2 instruction. - // Watch the signs here, we want 2 32-bit vals - // to make a sign-64. - int64_t i64; - if (IsFp64Mode()) { - i64 = get_fpu_register(fs_reg()); - } else { - i64 = static_cast(get_fpu_register_word(fs_reg())); - i64 |= static_cast(get_fpu_register_word(fs_reg() + 1)) << 32; - } - SetFPUDoubleResult(fd_reg(), static_cast(i64)); - break; - case CVT_S_L: - if (IsFp64Mode()) { - i64 = get_fpu_register(fs_reg()); - } else { - i64 = static_cast(get_fpu_register_word(fs_reg())); - i64 |= static_cast(get_fpu_register_word(fs_reg() + 1)) << 32; - } - SetFPUFloatResult(fd_reg(), static_cast(i64)); - break; - case CMP_AF: // Mips64r6 CMP.D instructions. - SetFPUResult(fd_reg(), 0); - break; - case CMP_UN: - if (std::isnan(fs) || std::isnan(ft)) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - case CMP_EQ: - if (fs == ft) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - case CMP_UEQ: - if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - case CMP_LT: - if (fs < ft) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - case CMP_ULT: - if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - case CMP_LE: - if (fs <= ft) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - case CMP_ULE: - if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - case CMP_OR: - if (!std::isnan(fs) && !std::isnan(ft)) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - case CMP_UNE: - if ((fs != ft) || (std::isnan(fs) || std::isnan(ft))) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - case CMP_NE: - if (fs != ft && (!std::isnan(fs) && !std::isnan(ft))) { - SetFPUResult(fd_reg(), -1); - } else { - SetFPUResult(fd_reg(), 0); - } - break; - default: - UNREACHABLE(); - } -} - -void Simulator::DecodeTypeRegisterCOP1() { - switch (instr_.RsFieldRaw()) { - case CFC1: - // At the moment only FCSR is supported. - DCHECK_EQ(fs_reg(), kFCSRRegister); - SetResult(rt_reg(), FCSR_); - break; - case MFC1: - SetResult(rt_reg(), get_fpu_register_word(fs_reg())); - break; - case MFHC1: - if (IsFp64Mode()) { - SetResult(rt_reg(), get_fpu_register_hi_word(fs_reg())); - } else { - SetResult(rt_reg(), get_fpu_register_word(fs_reg() + 1)); - } - break; - case CTC1: { - // At the moment only FCSR is supported. - DCHECK_EQ(fs_reg(), kFCSRRegister); - int32_t reg = registers_[rt_reg()]; - if (IsMipsArchVariant(kMips32r6)) { - FCSR_ = reg | kFCSRNaN2008FlagMask; - } else { - DCHECK(IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2)); - FCSR_ = reg & ~kFCSRNaN2008FlagMask; - } - TraceRegWr(static_cast(FCSR_)); - break; - } - case MTC1: - // Hardware writes upper 32-bits to zero on mtc1. - set_fpu_register_hi_word(fs_reg(), 0); - set_fpu_register_word(fs_reg(), registers_[rt_reg()]); - TraceRegWr(get_fpu_register_word(fs_reg()), FLOAT); - break; - case MTHC1: - if (IsFp64Mode()) { - set_fpu_register_hi_word(fs_reg(), registers_[rt_reg()]); - TraceRegWr(get_fpu_register(fs_reg()), DOUBLE); - } else { - set_fpu_register_word(fs_reg() + 1, registers_[rt_reg()]); - if (fs_reg() % 2) { - TraceRegWr(get_fpu_register_word(fs_reg() + 1), FLOAT); - } else { - TraceRegWr(get_fpu_register(fs_reg()), DOUBLE); - } - } - break; - case S: { - DecodeTypeRegisterSRsType(); - break; - } - case D: - DecodeTypeRegisterDRsType(); - break; - case W: - DecodeTypeRegisterWRsType(); - break; - case L: - DecodeTypeRegisterLRsType(); - break; - case PS: - // Not implemented. - UNREACHABLE(); - default: - UNREACHABLE(); - } -} - -void Simulator::DecodeTypeRegisterCOP1X() { - switch (instr_.FunctionFieldRaw()) { - case MADD_S: { - DCHECK(IsMipsArchVariant(kMips32r2)); - float fr, ft, fs; - fr = get_fpu_register_float(fr_reg()); - fs = get_fpu_register_float(fs_reg()); - ft = get_fpu_register_float(ft_reg()); - SetFPUFloatResult(fd_reg(), fs * ft + fr); - break; - } - case MSUB_S: { - DCHECK(IsMipsArchVariant(kMips32r2)); - float fr, ft, fs; - fr = get_fpu_register_float(fr_reg()); - fs = get_fpu_register_float(fs_reg()); - ft = get_fpu_register_float(ft_reg()); - SetFPUFloatResult(fd_reg(), fs * ft - fr); - break; - } - case MADD_D: { - DCHECK(IsMipsArchVariant(kMips32r2)); - double fr, ft, fs; - fr = get_fpu_register_double(fr_reg()); - fs = get_fpu_register_double(fs_reg()); - ft = get_fpu_register_double(ft_reg()); - SetFPUDoubleResult(fd_reg(), fs * ft + fr); - break; - } - case MSUB_D: { - DCHECK(IsMipsArchVariant(kMips32r2)); - double fr, ft, fs; - fr = get_fpu_register_double(fr_reg()); - fs = get_fpu_register_double(fs_reg()); - ft = get_fpu_register_double(ft_reg()); - SetFPUDoubleResult(fd_reg(), fs * ft - fr); - break; - } - default: - UNREACHABLE(); - } -} - -void Simulator::DecodeTypeRegisterSPECIAL() { - int64_t alu_out = 0x12345678; - int64_t i64hilo = 0; - uint64_t u64hilo = 0; - bool do_interrupt = false; - - switch (instr_.FunctionFieldRaw()) { - case SELEQZ_S: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetResult(rd_reg(), rt() == 0 ? rs() : 0); - break; - case SELNEZ_S: - DCHECK(IsMipsArchVariant(kMips32r6)); - SetResult(rd_reg(), rt() != 0 ? rs() : 0); - break; - case JR: { - int32_t next_pc = rs(); - int32_t current_pc = get_pc(); - Instruction* branch_delay_instr = - reinterpret_cast(current_pc + kInstrSize); - BranchDelayInstructionDecode(branch_delay_instr); - set_pc(next_pc); - pc_modified_ = true; - break; - } - case JALR: { - int32_t next_pc = rs(); - int32_t return_addr_reg = rd_reg(); - int32_t current_pc = get_pc(); - Instruction* branch_delay_instr = - reinterpret_cast(current_pc + kInstrSize); - BranchDelayInstructionDecode(branch_delay_instr); - set_register(return_addr_reg, current_pc + 2 * kInstrSize); - set_pc(next_pc); - pc_modified_ = true; - break; - } - case SLL: - alu_out = rt() << sa(); - SetResult(rd_reg(), static_cast(alu_out)); - break; - case SRL: - if (rs_reg() == 0) { - // Regular logical right shift of a word by a fixed number of - // bits instruction. RS field is always equal to 0. - alu_out = rt_u() >> sa(); - } else { - // Logical right-rotate of a word by a fixed number of bits. This - // is special case of SRL instruction, added in MIPS32 Release 2. - // RS field is equal to 00001. - alu_out = base::bits::RotateRight32(rt_u(), sa()); - } - SetResult(rd_reg(), static_cast(alu_out)); - break; - case SRA: - alu_out = rt() >> sa(); - SetResult(rd_reg(), static_cast(alu_out)); - break; - case SLLV: - alu_out = rt() << rs(); - SetResult(rd_reg(), static_cast(alu_out)); - break; - case SRLV: - if (sa() == 0) { - // Regular logical right-shift of a word by a variable number of - // bits instruction. SA field is always equal to 0. - alu_out = rt_u() >> rs(); - } else { - // Logical right-rotate of a word by a variable number of bits. - // This is special case od SRLV instruction, added in MIPS32 - // Release 2. SA field is equal to 00001. - alu_out = base::bits::RotateRight32(rt_u(), rs_u()); - } - SetResult(rd_reg(), static_cast(alu_out)); - break; - case SRAV: - SetResult(rd_reg(), rt() >> rs()); - break; - case LSA: { - DCHECK(IsMipsArchVariant(kMips32r6)); - int8_t sa = lsa_sa() + 1; - int32_t _rt = rt(); - int32_t _rs = rs(); - int32_t res = _rs << sa; - res += _rt; - DCHECK_EQ(res, (rs() << (lsa_sa() + 1)) + rt()); - USE(res); - SetResult(rd_reg(), (rs() << (lsa_sa() + 1)) + rt()); - break; - } - case MFHI: // MFHI == CLZ on R6. - if (!IsMipsArchVariant(kMips32r6)) { - DCHECK_EQ(sa(), 0); - alu_out = get_register(HI); - } else { - // MIPS spec: If no bits were set in GPR rs, the result written to - // GPR rd is 32. - DCHECK_EQ(sa(), 1); - alu_out = base::bits::CountLeadingZeros32(rs_u()); - } - SetResult(rd_reg(), static_cast(alu_out)); - break; - case MFLO: - alu_out = get_register(LO); - SetResult(rd_reg(), static_cast(alu_out)); - break; - // Instructions using HI and LO registers. - case MULT: - i64hilo = static_cast(rs()) * static_cast(rt()); - if (!IsMipsArchVariant(kMips32r6)) { - set_register(LO, static_cast(i64hilo & 0xFFFFFFFF)); - set_register(HI, static_cast(i64hilo >> 32)); - } else { - switch (sa()) { - case MUL_OP: - SetResult(rd_reg(), static_cast(i64hilo & 0xFFFFFFFF)); - break; - case MUH_OP: - SetResult(rd_reg(), static_cast(i64hilo >> 32)); - break; - default: - UNIMPLEMENTED_MIPS(); - break; - } - } - break; - case MULTU: - u64hilo = static_cast(rs_u()) * static_cast(rt_u()); - if (!IsMipsArchVariant(kMips32r6)) { - set_register(LO, static_cast(u64hilo & 0xFFFFFFFF)); - set_register(HI, static_cast(u64hilo >> 32)); - } else { - switch (sa()) { - case MUL_OP: - SetResult(rd_reg(), static_cast(u64hilo & 0xFFFFFFFF)); - break; - case MUH_OP: - SetResult(rd_reg(), static_cast(u64hilo >> 32)); - break; - default: - UNIMPLEMENTED_MIPS(); - break; - } - } - break; - case DIV: - if (IsMipsArchVariant(kMips32r6)) { - switch (sa()) { - case DIV_OP: - if (rs() == INT_MIN && rt() == -1) { - SetResult(rd_reg(), INT_MIN); - } else if (rt() != 0) { - SetResult(rd_reg(), rs() / rt()); - } - break; - case MOD_OP: - if (rs() == INT_MIN && rt() == -1) { - SetResult(rd_reg(), 0); - } else if (rt() != 0) { - SetResult(rd_reg(), rs() % rt()); - } - break; - default: - UNIMPLEMENTED_MIPS(); - break; - } - } else { - // Divide by zero and overflow was not checked in the - // configuration step - div and divu do not raise exceptions. On - // division by 0 the result will be UNPREDICTABLE. On overflow - // (INT_MIN/-1), return INT_MIN which is what the hardware does. - if (rs() == INT_MIN && rt() == -1) { - set_register(LO, INT_MIN); - set_register(HI, 0); - } else if (rt() != 0) { - set_register(LO, rs() / rt()); - set_register(HI, rs() % rt()); - } - } - break; - case DIVU: - if (IsMipsArchVariant(kMips32r6)) { - switch (sa()) { - case DIV_OP: - if (rt_u() != 0) { - SetResult(rd_reg(), rs_u() / rt_u()); - } - break; - case MOD_OP: - if (rt_u() != 0) { - SetResult(rd_reg(), rs_u() % rt_u()); - } - break; - default: - UNIMPLEMENTED_MIPS(); - break; - } - } else { - if (rt_u() != 0) { - set_register(LO, rs_u() / rt_u()); - set_register(HI, rs_u() % rt_u()); - } - } - break; - case ADD: - if (HaveSameSign(rs(), rt())) { - if (rs() > 0) { - if (rs() <= (Registers::kMaxValue - rt())) { - SignalException(kIntegerOverflow); - } - } else if (rs() < 0) { - if (rs() >= (Registers::kMinValue - rt())) { - SignalException(kIntegerUnderflow); - } - } - } - SetResult(rd_reg(), rs() + rt()); - break; - case ADDU: - SetResult(rd_reg(), rs() + rt()); - break; - case SUB: - if (!HaveSameSign(rs(), rt())) { - if (rs() > 0) { - if (rs() <= (Registers::kMaxValue + rt())) { - SignalException(kIntegerOverflow); - } - } else if (rs() < 0) { - if (rs() >= (Registers::kMinValue + rt())) { - SignalException(kIntegerUnderflow); - } - } - } - SetResult(rd_reg(), rs() - rt()); - break; - case SUBU: - SetResult(rd_reg(), rs() - rt()); - break; - case AND: - SetResult(rd_reg(), rs() & rt()); - break; - case OR: - SetResult(rd_reg(), rs() | rt()); - break; - case XOR: - SetResult(rd_reg(), rs() ^ rt()); - break; - case NOR: - SetResult(rd_reg(), ~(rs() | rt())); - break; - case SLT: - SetResult(rd_reg(), rs() < rt() ? 1 : 0); - break; - case SLTU: - SetResult(rd_reg(), rs_u() < rt_u() ? 1 : 0); - break; - // Break and trap instructions. - case BREAK: - do_interrupt = true; - break; - case TGE: - do_interrupt = rs() >= rt(); - break; - case TGEU: - do_interrupt = rs_u() >= rt_u(); - break; - case TLT: - do_interrupt = rs() < rt(); - break; - case TLTU: - do_interrupt = rs_u() < rt_u(); - break; - case TEQ: - do_interrupt = rs() == rt(); - break; - case TNE: - do_interrupt = rs() != rt(); - break; - case SYNC: - // TODO(palfia): Ignore sync instruction for now. - break; - // Conditional moves. - case MOVN: - if (rt()) { - SetResult(rd_reg(), rs()); - } - break; - case MOVCI: { - uint32_t cc = instr_.FBccValue(); - uint32_t fcsr_cc = get_fcsr_condition_bit(cc); - if (instr_.Bit(16)) { // Read Tf bit. - if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs()); - } else { - if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg(), rs()); - } - break; - } - case MOVZ: - if (!rt()) { - SetResult(rd_reg(), rs()); - } - break; - default: - UNREACHABLE(); - } - if (do_interrupt) { - SoftwareInterrupt(); - } -} - -void Simulator::DecodeTypeRegisterSPECIAL2() { - int32_t alu_out; - switch (instr_.FunctionFieldRaw()) { - case MUL: - // Only the lower 32 bits are kept. - alu_out = rs_u() * rt_u(); - // HI and LO are UNPREDICTABLE after the operation. - set_register(LO, Unpredictable); - set_register(HI, Unpredictable); - break; - case CLZ: - // MIPS32 spec: If no bits were set in GPR rs, the result written to - // GPR rd is 32. - alu_out = base::bits::CountLeadingZeros32(rs_u()); - break; - default: - alu_out = 0x12345678; - UNREACHABLE(); - } - SetResult(rd_reg(), alu_out); -} - -void Simulator::DecodeTypeRegisterSPECIAL3() { - int32_t alu_out; - switch (instr_.FunctionFieldRaw()) { - case INS: { // Mips32r2 instruction. - // Interpret rd field as 5-bit msb of insert. - uint16_t msb = rd_reg(); - // Interpret sa field as 5-bit lsb of insert. - uint16_t lsb = sa(); - uint16_t size = msb - lsb + 1; - uint32_t mask; - if (size < 32) { - mask = (1 << size) - 1; - } else { - mask = std::numeric_limits::max(); - } - alu_out = (rt_u() & ~(mask << lsb)) | ((rs_u() & mask) << lsb); - // Ins instr leaves result in Rt, rather than Rd. - SetResult(rt_reg(), alu_out); - break; - } - case EXT: { // Mips32r2 instruction. - // Interpret rd field as 5-bit msb of extract. - uint16_t msb = rd_reg(); - // Interpret sa field as 5-bit lsb of extract. - uint16_t lsb = sa(); - uint16_t size = msb + 1; - uint32_t mask; - if (size < 32) { - mask = (1 << size) - 1; - } else { - mask = std::numeric_limits::max(); - } - alu_out = (rs_u() & (mask << lsb)) >> lsb; - SetResult(rt_reg(), alu_out); - break; - } - case BSHFL: { - int sa = instr_.SaFieldRaw() >> kSaShift; - switch (sa) { - case BITSWAP: { - uint32_t input = static_cast(rt()); - uint32_t output = 0; - uint8_t i_byte, o_byte; - - // Reverse the bit in byte for each individual byte - for (int i = 0; i < 4; i++) { - output = output >> 8; - i_byte = input & 0xFF; - - // Fast way to reverse bits in byte - // Devised by Sean Anderson, July 13, 2001 - o_byte = static_cast(((i_byte * 0x0802LU & 0x22110LU) | - (i_byte * 0x8020LU & 0x88440LU)) * - 0x10101LU >> - 16); - - output = output | (static_cast(o_byte << 24)); - input = input >> 8; - } - - alu_out = static_cast(output); - break; - } - case SEB: { - uint8_t input = static_cast(rt()); - uint32_t output = input; - uint32_t mask = 0x00000080; - - // Extending sign - if (mask & input) { - output |= 0xFFFFFF00; - } - - alu_out = static_cast(output); - break; - } - case SEH: { - uint16_t input = static_cast(rt()); - uint32_t output = input; - uint32_t mask = 0x00008000; - - // Extending sign - if (mask & input) { - output |= 0xFFFF0000; - } - - alu_out = static_cast(output); - break; - } - case WSBH: { - uint32_t input = static_cast(rt()); - uint32_t output = 0; - - uint32_t mask = 0xFF000000; - for (int i = 0; i < 4; i++) { - uint32_t tmp = mask & input; - if (i % 2 == 0) { - tmp = tmp >> 8; - } else { - tmp = tmp << 8; - } - output = output | tmp; - mask = mask >> 8; - } - - alu_out = static_cast(output); - break; - } - default: { - const uint8_t bp = instr_.Bp2Value(); - sa >>= kBp2Bits; - switch (sa) { - case ALIGN: { - if (bp == 0) { - alu_out = static_cast(rt()); - } else { - uint32_t rt_hi = rt() << (8 * bp); - uint32_t rs_lo = rs() >> (8 * (4 - bp)); - alu_out = static_cast(rt_hi | rs_lo); - } - break; - } - default: - alu_out = 0x12345678; - UNREACHABLE(); - } - } - } - SetResult(rd_reg(), alu_out); - break; - } - default: - UNREACHABLE(); - } -} - -int Simulator::DecodeMsaDataFormat() { - int df = -1; - if (instr_.IsMSABranchInstr()) { - switch (instr_.RsFieldRaw()) { - case BZ_V: - case BNZ_V: - df = MSA_VECT; - break; - case BZ_B: - case BNZ_B: - df = MSA_BYTE; - break; - case BZ_H: - case BNZ_H: - df = MSA_HALF; - break; - case BZ_W: - case BNZ_W: - df = MSA_WORD; - break; - case BZ_D: - case BNZ_D: - df = MSA_DWORD; - break; - default: - UNREACHABLE(); - } - } else { - int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD}; - switch (instr_.MSAMinorOpcodeField()) { - case kMsaMinorI5: - case kMsaMinorI10: - case kMsaMinor3R: - df = DF[instr_.Bits(22, 21)]; - break; - case kMsaMinorMI10: - df = DF[instr_.Bits(1, 0)]; - break; - case kMsaMinorBIT: - df = DF[instr_.MsaBitDf()]; - break; - case kMsaMinorELM: - df = DF[instr_.MsaElmDf()]; - break; - case kMsaMinor3RF: { - uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask; - switch (opcode) { - case FEXDO: - case FTQ: - case MUL_Q: - case MADD_Q: - case MSUB_Q: - case MULR_Q: - case MADDR_Q: - case MSUBR_Q: - df = DF[1 + instr_.Bit(21)]; - break; - default: - df = DF[2 + instr_.Bit(21)]; - break; - } - } break; - case kMsaMinor2R: - df = DF[instr_.Bits(17, 16)]; - break; - case kMsaMinor2RF: - df = DF[2 + instr_.Bit(16)]; - break; - default: - UNREACHABLE(); - } - } - return df; -} - -void Simulator::DecodeTypeMsaI8() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsaI8Mask; - int8_t i8 = instr_.MsaImm8Value(); - msa_reg_t ws, wd; - - switch (opcode) { - case ANDI_B: - get_msa_register(instr_.WsValue(), ws.b); - for (int i = 0; i < kMSALanesByte; i++) { - wd.b[i] = ws.b[i] & i8; - } - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - case ORI_B: - get_msa_register(instr_.WsValue(), ws.b); - for (int i = 0; i < kMSALanesByte; i++) { - wd.b[i] = ws.b[i] | i8; - } - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - case NORI_B: - get_msa_register(instr_.WsValue(), ws.b); - for (int i = 0; i < kMSALanesByte; i++) { - wd.b[i] = ~(ws.b[i] | i8); - } - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - case XORI_B: - get_msa_register(instr_.WsValue(), ws.b); - for (int i = 0; i < kMSALanesByte; i++) { - wd.b[i] = ws.b[i] ^ i8; - } - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - case BMNZI_B: - get_msa_register(instr_.WsValue(), ws.b); - get_msa_register(instr_.WdValue(), wd.b); - for (int i = 0; i < kMSALanesByte; i++) { - wd.b[i] = (ws.b[i] & i8) | (wd.b[i] & ~i8); - } - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - case BMZI_B: - get_msa_register(instr_.WsValue(), ws.b); - get_msa_register(instr_.WdValue(), wd.b); - for (int i = 0; i < kMSALanesByte; i++) { - wd.b[i] = (ws.b[i] & ~i8) | (wd.b[i] & i8); - } - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - case BSELI_B: - get_msa_register(instr_.WsValue(), ws.b); - get_msa_register(instr_.WdValue(), wd.b); - for (int i = 0; i < kMSALanesByte; i++) { - wd.b[i] = (ws.b[i] & ~wd.b[i]) | (wd.b[i] & i8); - } - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - case SHF_B: - get_msa_register(instr_.WsValue(), ws.b); - for (int i = 0; i < kMSALanesByte; i++) { - int j = i % 4; - int k = (i8 >> (2 * j)) & 0x3; - wd.b[i] = ws.b[i - j + k]; - } - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - case SHF_H: - get_msa_register(instr_.WsValue(), ws.h); - for (int i = 0; i < kMSALanesHalf; i++) { - int j = i % 4; - int k = (i8 >> (2 * j)) & 0x3; - wd.h[i] = ws.h[i - j + k]; - } - set_msa_register(instr_.WdValue(), wd.h); - TraceMSARegWr(wd.h); - break; - case SHF_W: - get_msa_register(instr_.WsValue(), ws.w); - for (int i = 0; i < kMSALanesWord; i++) { - int j = (i8 >> (2 * i)) & 0x3; - wd.w[i] = ws.w[j]; - } - set_msa_register(instr_.WdValue(), wd.w); - TraceMSARegWr(wd.w); - break; - default: - UNREACHABLE(); - } -} - -template -T Simulator::MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5) { - T res; - uint32_t ui5 = i5 & 0x1Fu; - uint64_t ws_u64 = static_cast(ws); - uint64_t ui5_u64 = static_cast(ui5); - - switch (opcode) { - case ADDVI: - res = static_cast(ws + ui5); - break; - case SUBVI: - res = static_cast(ws - ui5); - break; - case MAXI_S: - res = static_cast(std::max(ws, static_cast(i5))); - break; - case MINI_S: - res = static_cast(std::min(ws, static_cast(i5))); - break; - case MAXI_U: - res = static_cast(std::max(ws_u64, ui5_u64)); - break; - case MINI_U: - res = static_cast(std::min(ws_u64, ui5_u64)); - break; - case CEQI: - res = static_cast(!Compare(ws, static_cast(i5)) ? -1ull : 0ull); - break; - case CLTI_S: - res = static_cast((Compare(ws, static_cast(i5)) == -1) ? -1ull - : 0ull); - break; - case CLTI_U: - res = static_cast((Compare(ws_u64, ui5_u64) == -1) ? -1ull : 0ull); - break; - case CLEI_S: - res = - static_cast((Compare(ws, static_cast(i5)) != 1) ? -1ull : 0ull); - break; - case CLEI_U: - res = static_cast((Compare(ws_u64, ui5_u64) != 1) ? -1ull : 0ull); - break; - default: - UNREACHABLE(); - } - return res; -} - -void Simulator::DecodeTypeMsaI5() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask; - msa_reg_t ws, wd; - - // sign extend 5bit value to int32_t - int32_t i5 = static_cast(instr_.MsaImm5Value() << 27) >> 27; - -#define MSA_I5_DF(elem, num_of_lanes) \ - get_msa_register(instr_.WsValue(), ws.elem); \ - for (int i = 0; i < num_of_lanes; i++) { \ - wd.elem[i] = MsaI5InstrHelper(opcode, ws.elem[i], i5); \ - } \ - set_msa_register(instr_.WdValue(), wd.elem); \ - TraceMSARegWr(wd.elem) - - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - MSA_I5_DF(b, kMSALanesByte); - break; - case MSA_HALF: - MSA_I5_DF(h, kMSALanesHalf); - break; - case MSA_WORD: - MSA_I5_DF(w, kMSALanesWord); - break; - case MSA_DWORD: - MSA_I5_DF(d, kMSALanesDword); - break; - default: - UNREACHABLE(); - } -#undef MSA_I5_DF -} - -void Simulator::DecodeTypeMsaI10() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsaI5Mask; - int64_t s10 = (static_cast(instr_.MsaImm10Value()) << 54) >> 54; - msa_reg_t wd; - -#define MSA_I10_DF(elem, num_of_lanes, T) \ - for (int i = 0; i < num_of_lanes; ++i) { \ - wd.elem[i] = static_cast(s10); \ - } \ - set_msa_register(instr_.WdValue(), wd.elem); \ - TraceMSARegWr(wd.elem) - - if (opcode == LDI) { - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - MSA_I10_DF(b, kMSALanesByte, int8_t); - break; - case MSA_HALF: - MSA_I10_DF(h, kMSALanesHalf, int16_t); - break; - case MSA_WORD: - MSA_I10_DF(w, kMSALanesWord, int32_t); - break; - case MSA_DWORD: - MSA_I10_DF(d, kMSALanesDword, int64_t); - break; - default: - UNREACHABLE(); - } - } else { - UNREACHABLE(); - } -#undef MSA_I10_DF -} - -void Simulator::DecodeTypeMsaELM() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsaLongerELMMask; - int32_t n = instr_.MsaElmNValue(); - int32_t alu_out; - switch (opcode) { - case CTCMSA: - DCHECK_EQ(sa(), kMSACSRRegister); - MSACSR_ = base::bit_cast(registers_[rd_reg()]); - TraceRegWr(static_cast(MSACSR_)); - break; - case CFCMSA: - DCHECK_EQ(rd_reg(), kMSACSRRegister); - SetResult(sa(), base::bit_cast(MSACSR_)); - break; - case MOVE_V: { - msa_reg_t ws; - get_msa_register(ws_reg(), &ws); - set_msa_register(wd_reg(), &ws); - TraceMSARegWr(&ws); - } break; - default: - opcode &= kMsaELMMask; - switch (opcode) { - case COPY_S: - case COPY_U: { - msa_reg_t ws; - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: { - DCHECK_LT(n, kMSALanesByte); - get_msa_register(instr_.WsValue(), ws.b); - alu_out = static_cast(ws.b[n]); - SetResult(wd_reg(), - (opcode == COPY_U) ? alu_out & 0xFFu : alu_out); - break; - } - case MSA_HALF: { - DCHECK_LT(n, kMSALanesHalf); - get_msa_register(instr_.WsValue(), ws.h); - alu_out = static_cast(ws.h[n]); - SetResult(wd_reg(), - (opcode == COPY_U) ? alu_out & 0xFFFFu : alu_out); - break; - } - case MSA_WORD: { - DCHECK_LT(n, kMSALanesWord); - get_msa_register(instr_.WsValue(), ws.w); - alu_out = static_cast(ws.w[n]); - SetResult(wd_reg(), alu_out); - break; - } - default: - UNREACHABLE(); - } - } break; - case INSERT: { - msa_reg_t wd; - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: { - DCHECK_LT(n, kMSALanesByte); - int32_t rs = get_register(instr_.WsValue()); - get_msa_register(instr_.WdValue(), wd.b); - wd.b[n] = rs & 0xFFu; - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - } - case MSA_HALF: { - DCHECK_LT(n, kMSALanesHalf); - int32_t rs = get_register(instr_.WsValue()); - get_msa_register(instr_.WdValue(), wd.h); - wd.h[n] = rs & 0xFFFFu; - set_msa_register(instr_.WdValue(), wd.h); - TraceMSARegWr(wd.h); - break; - } - case MSA_WORD: { - DCHECK_LT(n, kMSALanesWord); - int32_t rs = get_register(instr_.WsValue()); - get_msa_register(instr_.WdValue(), wd.w); - wd.w[n] = rs; - set_msa_register(instr_.WdValue(), wd.w); - TraceMSARegWr(wd.w); - break; - } - default: - UNREACHABLE(); - } - } break; - case SLDI: { - uint8_t v[32]; - msa_reg_t ws; - msa_reg_t wd; - get_msa_register(ws_reg(), &ws); - get_msa_register(wd_reg(), &wd); -#define SLDI_DF(s, k) \ - for (unsigned i = 0; i < s; i++) { \ - v[i] = ws.b[s * k + i]; \ - v[i + s] = wd.b[s * k + i]; \ - } \ - for (unsigned i = 0; i < s; i++) { \ - wd.b[s * k + i] = v[i + n]; \ - } - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - DCHECK(n < kMSALanesByte); - SLDI_DF(kMSARegSize / sizeof(int8_t) / kBitsPerByte, 0) - break; - case MSA_HALF: - DCHECK(n < kMSALanesHalf); - for (int k = 0; k < 2; ++k) { - SLDI_DF(kMSARegSize / sizeof(int16_t) / kBitsPerByte, k) - } - break; - case MSA_WORD: - DCHECK(n < kMSALanesWord); - for (int k = 0; k < 4; ++k) { - SLDI_DF(kMSARegSize / sizeof(int32_t) / kBitsPerByte, k) - } - break; - case MSA_DWORD: - DCHECK(n < kMSALanesDword); - for (int k = 0; k < 8; ++k) { - SLDI_DF(kMSARegSize / sizeof(int64_t) / kBitsPerByte, k) - } - break; - default: - UNREACHABLE(); - } - set_msa_register(wd_reg(), &wd); - TraceMSARegWr(&wd); - } break; -#undef SLDI_DF - case SPLATI: - case INSVE: - UNIMPLEMENTED(); - default: - UNREACHABLE(); - } - break; - } -} - -template -T Simulator::MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m) { - using uT = typename std::make_unsigned::type; - T res; - switch (opcode) { - case SLLI: - res = static_cast(ws << m); - break; - case SRAI: - res = static_cast(ArithmeticShiftRight(ws, m)); - break; - case SRLI: - res = static_cast(static_cast(ws) >> m); - break; - case BCLRI: - res = static_cast(static_cast(~(1ull << m)) & ws); - break; - case BSETI: - res = static_cast(static_cast(1ull << m) | ws); - break; - case BNEGI: - res = static_cast(static_cast(1ull << m) ^ ws); - break; - case BINSLI: { - int elem_size = 8 * sizeof(T); - int bits = m + 1; - if (bits == elem_size) { - res = static_cast(ws); - } else { - uint64_t mask = ((1ull << bits) - 1) << (elem_size - bits); - res = static_cast((static_cast(mask) & ws) | - (static_cast(~mask) & wd)); - } - } break; - case BINSRI: { - int elem_size = 8 * sizeof(T); - int bits = m + 1; - if (bits == elem_size) { - res = static_cast(ws); - } else { - uint64_t mask = (1ull << bits) - 1; - res = static_cast((static_cast(mask) & ws) | - (static_cast(~mask) & wd)); - } - } break; - case SAT_S: { -#define M_MAX_INT(x) static_cast((1LL << ((x)-1)) - 1) -#define M_MIN_INT(x) static_cast(-(1LL << ((x)-1))) - int shift = 64 - 8 * sizeof(T); - int64_t ws_i64 = (static_cast(ws) << shift) >> shift; - res = static_cast(ws_i64 < M_MIN_INT(m + 1) - ? M_MIN_INT(m + 1) - : ws_i64 > M_MAX_INT(m + 1) ? M_MAX_INT(m + 1) - : ws_i64); -#undef M_MAX_INT -#undef M_MIN_INT - } break; - case SAT_U: { -#define M_MAX_UINT(x) static_cast(-1ULL >> (64 - (x))) - uint64_t mask = static_cast(-1ULL >> (64 - 8 * sizeof(T))); - uint64_t ws_u64 = static_cast(ws) & mask; - res = static_cast(ws_u64 < M_MAX_UINT(m + 1) ? ws_u64 - : M_MAX_UINT(m + 1)); -#undef M_MAX_UINT - } break; - case SRARI: - if (!m) { - res = static_cast(ws); - } else { - res = static_cast(ArithmeticShiftRight(ws, m)) + - static_cast((ws >> (m - 1)) & 0x1); - } - break; - case SRLRI: - if (!m) { - res = static_cast(ws); - } else { - res = static_cast(static_cast(ws) >> m) + - static_cast((ws >> (m - 1)) & 0x1); - } - break; - default: - UNREACHABLE(); - } - return res; -} - -void Simulator::DecodeTypeMsaBIT() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsaBITMask; - int32_t m = instr_.MsaBitMValue(); - msa_reg_t wd, ws; - -#define MSA_BIT_DF(elem, num_of_lanes) \ - get_msa_register(instr_.WsValue(), ws.elem); \ - if (opcode == BINSLI || opcode == BINSRI) { \ - get_msa_register(instr_.WdValue(), wd.elem); \ - } \ - for (int i = 0; i < num_of_lanes; i++) { \ - wd.elem[i] = MsaBitInstrHelper(opcode, wd.elem[i], ws.elem[i], m); \ - } \ - set_msa_register(instr_.WdValue(), wd.elem); \ - TraceMSARegWr(wd.elem) - - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - DCHECK(m < kMSARegSize / kMSALanesByte); - MSA_BIT_DF(b, kMSALanesByte); - break; - case MSA_HALF: - DCHECK(m < kMSARegSize / kMSALanesHalf); - MSA_BIT_DF(h, kMSALanesHalf); - break; - case MSA_WORD: - DCHECK(m < kMSARegSize / kMSALanesWord); - MSA_BIT_DF(w, kMSALanesWord); - break; - case MSA_DWORD: - DCHECK(m < kMSARegSize / kMSALanesDword); - MSA_BIT_DF(d, kMSALanesDword); - break; - default: - UNREACHABLE(); - } -#undef MSA_BIT_DF -} - -void Simulator::DecodeTypeMsaMI10() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsaMI10Mask; - int32_t s10 = (static_cast(instr_.MsaImmMI10Value()) << 22) >> 22; - int32_t rs = get_register(instr_.WsValue()); - int32_t addr; - msa_reg_t wd; - -#define MSA_MI10_LOAD(elem, num_of_lanes, T) \ - for (int i = 0; i < num_of_lanes; ++i) { \ - addr = rs + (s10 + i) * sizeof(T); \ - wd.elem[i] = ReadMem(addr, instr_.instr()); \ - } \ - set_msa_register(instr_.WdValue(), wd.elem); - -#define MSA_MI10_STORE(elem, num_of_lanes, T) \ - get_msa_register(instr_.WdValue(), wd.elem); \ - for (int i = 0; i < num_of_lanes; ++i) { \ - addr = rs + (s10 + i) * sizeof(T); \ - WriteMem(addr, wd.elem[i], instr_.instr()); \ - } - - if (opcode == MSA_LD) { - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - MSA_MI10_LOAD(b, kMSALanesByte, int8_t); - break; - case MSA_HALF: - MSA_MI10_LOAD(h, kMSALanesHalf, int16_t); - break; - case MSA_WORD: - MSA_MI10_LOAD(w, kMSALanesWord, int32_t); - break; - case MSA_DWORD: - MSA_MI10_LOAD(d, kMSALanesDword, int64_t); - break; - default: - UNREACHABLE(); - } - } else if (opcode == MSA_ST) { - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - MSA_MI10_STORE(b, kMSALanesByte, int8_t); - break; - case MSA_HALF: - MSA_MI10_STORE(h, kMSALanesHalf, int16_t); - break; - case MSA_WORD: - MSA_MI10_STORE(w, kMSALanesWord, int32_t); - break; - case MSA_DWORD: - MSA_MI10_STORE(d, kMSALanesDword, int64_t); - break; - default: - UNREACHABLE(); - } - } else { - UNREACHABLE(); - } - -#undef MSA_MI10_LOAD -#undef MSA_MI10_STORE -} - -template -T Simulator::Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt) { - using uT = typename std::make_unsigned::type; - T res; - T wt_modulo = wt % (sizeof(T) * 8); - switch (opcode) { - case SLL_MSA: - res = static_cast(ws << wt_modulo); - break; - case SRA_MSA: - res = static_cast(ArithmeticShiftRight(ws, wt_modulo)); - break; - case SRL_MSA: - res = static_cast(static_cast(ws) >> wt_modulo); - break; - case BCLR: - res = static_cast(static_cast(~(1ull << wt_modulo)) & ws); - break; - case BSET: - res = static_cast(static_cast(1ull << wt_modulo) | ws); - break; - case BNEG: - res = static_cast(static_cast(1ull << wt_modulo) ^ ws); - break; - case BINSL: { - int elem_size = 8 * sizeof(T); - int bits = wt_modulo + 1; - if (bits == elem_size) { - res = static_cast(ws); - } else { - uint64_t mask = ((1ull << bits) - 1) << (elem_size - bits); - res = static_cast((static_cast(mask) & ws) | - (static_cast(~mask) & wd)); - } - } break; - case BINSR: { - int elem_size = 8 * sizeof(T); - int bits = wt_modulo + 1; - if (bits == elem_size) { - res = static_cast(ws); - } else { - uint64_t mask = (1ull << bits) - 1; - res = static_cast((static_cast(mask) & ws) | - (static_cast(~mask) & wd)); - } - } break; - case ADDV: - res = ws + wt; - break; - case SUBV: - res = ws - wt; - break; - case MAX_S: - res = std::max(ws, wt); - break; - case MAX_U: - res = static_cast(std::max(static_cast(ws), static_cast(wt))); - break; - case MIN_S: - res = std::min(ws, wt); - break; - case MIN_U: - res = static_cast(std::min(static_cast(ws), static_cast(wt))); - break; - case MAX_A: - // We use negative abs in order to avoid problems - // with corner case for MIN_INT - res = Nabs(ws) < Nabs(wt) ? ws : wt; - break; - case MIN_A: - // We use negative abs in order to avoid problems - // with corner case for MIN_INT - res = Nabs(ws) > Nabs(wt) ? ws : wt; - break; - case CEQ: - res = static_cast(!Compare(ws, wt) ? -1ull : 0ull); - break; - case CLT_S: - res = static_cast((Compare(ws, wt) == -1) ? -1ull : 0ull); - break; - case CLT_U: - res = static_cast( - (Compare(static_cast(ws), static_cast(wt)) == -1) ? -1ull - : 0ull); - break; - case CLE_S: - res = static_cast((Compare(ws, wt) != 1) ? -1ull : 0ull); - break; - case CLE_U: - res = static_cast( - (Compare(static_cast(ws), static_cast(wt)) != 1) ? -1ull - : 0ull); - break; - case ADD_A: - res = static_cast(Abs(ws) + Abs(wt)); - break; - case ADDS_A: { - T ws_nabs = Nabs(ws); - T wt_nabs = Nabs(wt); - if (ws_nabs < -std::numeric_limits::max() - wt_nabs) { - res = std::numeric_limits::max(); - } else { - res = -(ws_nabs + wt_nabs); - } - } break; - case ADDS_S: - res = SaturateAdd(ws, wt); - break; - case ADDS_U: { - uT ws_u = static_cast(ws); - uT wt_u = static_cast(wt); - res = static_cast(SaturateAdd(ws_u, wt_u)); - } break; - case AVE_S: - res = static_cast((wt & ws) + ((wt ^ ws) >> 1)); - break; - case AVE_U: { - uT ws_u = static_cast(ws); - uT wt_u = static_cast(wt); - res = static_cast((wt_u & ws_u) + ((wt_u ^ ws_u) >> 1)); - } break; - case AVER_S: - res = static_cast((wt | ws) - ((wt ^ ws) >> 1)); - break; - case AVER_U: { - uT ws_u = static_cast(ws); - uT wt_u = static_cast(wt); - res = static_cast((wt_u | ws_u) - ((wt_u ^ ws_u) >> 1)); - } break; - case SUBS_S: - res = SaturateSub(ws, wt); - break; - case SUBS_U: { - uT ws_u = static_cast(ws); - uT wt_u = static_cast(wt); - res = static_cast(SaturateSub(ws_u, wt_u)); - } break; - case SUBSUS_U: { - uT wsu = static_cast(ws); - if (wt > 0) { - uT wtu = static_cast(wt); - if (wtu > wsu) { - res = 0; - } else { - res = static_cast(wsu - wtu); - } - } else { - if (wsu > std::numeric_limits::max() + wt) { - res = static_cast(std::numeric_limits::max()); - } else { - res = static_cast(wsu - wt); - } - } - } break; - case SUBSUU_S: { - uT wsu = static_cast(ws); - uT wtu = static_cast(wt); - uT wdu; - if (wsu > wtu) { - wdu = wsu - wtu; - if (wdu > std::numeric_limits::max()) { - res = std::numeric_limits::max(); - } else { - res = static_cast(wdu); - } - } else { - wdu = wtu - wsu; - CHECK(-std::numeric_limits::max() == - std::numeric_limits::min() + 1); - if (wdu <= std::numeric_limits::max()) { - res = -static_cast(wdu); - } else { - res = std::numeric_limits::min(); - } - } - } break; - case ASUB_S: - res = static_cast(Abs(ws - wt)); - break; - case ASUB_U: { - uT wsu = static_cast(ws); - uT wtu = static_cast(wt); - res = static_cast(wsu > wtu ? wsu - wtu : wtu - wsu); - } break; - case MULV: - res = ws * wt; - break; - case MADDV: - res = wd + ws * wt; - break; - case MSUBV: - res = wd - ws * wt; - break; - case DIV_S_MSA: - res = wt != 0 ? ws / wt : static_cast(Unpredictable); - break; - case DIV_U: - res = wt != 0 ? static_cast(static_cast(ws) / static_cast(wt)) - : static_cast(Unpredictable); - break; - case MOD_S: - res = wt != 0 ? ws % wt : static_cast(Unpredictable); - break; - case MOD_U: - res = wt != 0 ? static_cast(static_cast(ws) % static_cast(wt)) - : static_cast(Unpredictable); - break; - case DOTP_S: - case DOTP_U: - case DPADD_S: - case DPADD_U: - case DPSUB_S: - case DPSUB_U: - case SLD: - case SPLAT: - UNIMPLEMENTED(); - break; - case SRAR: { - int bit = wt_modulo == 0 ? 0 : (ws >> (wt_modulo - 1)) & 1; - res = static_cast(ArithmeticShiftRight(ws, wt_modulo) + bit); - } break; - case SRLR: { - uT wsu = static_cast(ws); - int bit = wt_modulo == 0 ? 0 : (wsu >> (wt_modulo - 1)) & 1; - res = static_cast((wsu >> wt_modulo) + bit); - } break; - default: - UNREACHABLE(); - } - return res; -} - -template -void Msa3RInstrHelper_shuffle(const uint32_t opcode, T_reg ws, T_reg wt, - T_reg wd, const int i, const int num_of_lanes) { - T_int *ws_p, *wt_p, *wd_p; - ws_p = reinterpret_cast(ws); - wt_p = reinterpret_cast(wt); - wd_p = reinterpret_cast(wd); - switch (opcode) { - case PCKEV: - wd_p[i] = wt_p[2 * i]; - wd_p[i + num_of_lanes / 2] = ws_p[2 * i]; - break; - case PCKOD: - wd_p[i] = wt_p[2 * i + 1]; - wd_p[i + num_of_lanes / 2] = ws_p[2 * i + 1]; - break; - case ILVL: - wd_p[2 * i] = wt_p[i + num_of_lanes / 2]; - wd_p[2 * i + 1] = ws_p[i + num_of_lanes / 2]; - break; - case ILVR: - wd_p[2 * i] = wt_p[i]; - wd_p[2 * i + 1] = ws_p[i]; - break; - case ILVEV: - wd_p[2 * i] = wt_p[2 * i]; - wd_p[2 * i + 1] = ws_p[2 * i]; - break; - case ILVOD: - wd_p[2 * i] = wt_p[2 * i + 1]; - wd_p[2 * i + 1] = ws_p[2 * i + 1]; - break; - case VSHF: { - const int mask_not_valid = 0xC0; - const int mask_6_bits = 0x3F; - if ((wd_p[i] & mask_not_valid)) { - wd_p[i] = 0; - } else { - int k = (wd_p[i] & mask_6_bits) % (num_of_lanes * 2); - wd_p[i] = k >= num_of_lanes ? ws_p[k - num_of_lanes] : wt_p[k]; - } - } break; - default: - UNREACHABLE(); - } -} - -template -void Msa3RInstrHelper_horizontal(const uint32_t opcode, T_reg ws, T_reg wt, - T_reg wd, const int i, - const int num_of_lanes) { - using T_uint = typename std::make_unsigned::type; - using T_smaller_uint = typename std::make_unsigned::type; - T_int* wd_p; - T_smaller_int *ws_p, *wt_p; - ws_p = reinterpret_cast(ws); - wt_p = reinterpret_cast(wt); - wd_p = reinterpret_cast(wd); - T_uint* wd_pu; - T_smaller_uint *ws_pu, *wt_pu; - ws_pu = reinterpret_cast(ws); - wt_pu = reinterpret_cast(wt); - wd_pu = reinterpret_cast(wd); - switch (opcode) { - case HADD_S: - wd_p[i] = - static_cast(ws_p[2 * i + 1]) + static_cast(wt_p[2 * i]); - break; - case HADD_U: - wd_pu[i] = static_cast(ws_pu[2 * i + 1]) + - static_cast(wt_pu[2 * i]); - break; - case HSUB_S: - wd_p[i] = - static_cast(ws_p[2 * i + 1]) - static_cast(wt_p[2 * i]); - break; - case HSUB_U: - wd_pu[i] = static_cast(ws_pu[2 * i + 1]) - - static_cast(wt_pu[2 * i]); - break; - default: - UNREACHABLE(); - } -} - -void Simulator::DecodeTypeMsa3R() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsa3RMask; - msa_reg_t ws, wd, wt; - get_msa_register(ws_reg(), &ws); - get_msa_register(wt_reg(), &wt); - get_msa_register(wd_reg(), &wd); - switch (opcode) { - case HADD_S: - case HADD_U: - case HSUB_S: - case HSUB_U: -#define HORIZONTAL_ARITHMETIC_DF(num_of_lanes, int_type, lesser_int_type) \ - for (int i = 0; i < num_of_lanes; ++i) { \ - Msa3RInstrHelper_horizontal( \ - opcode, &ws, &wt, &wd, i, num_of_lanes); \ - } - switch (DecodeMsaDataFormat()) { - case MSA_HALF: - HORIZONTAL_ARITHMETIC_DF(kMSALanesHalf, int16_t, int8_t); - break; - case MSA_WORD: - HORIZONTAL_ARITHMETIC_DF(kMSALanesWord, int32_t, int16_t); - break; - case MSA_DWORD: - HORIZONTAL_ARITHMETIC_DF(kMSALanesDword, int64_t, int32_t); - break; - default: - UNREACHABLE(); - } - break; -#undef HORIZONTAL_ARITHMETIC_DF - case VSHF: -#define VSHF_DF(num_of_lanes, int_type) \ - for (int i = 0; i < num_of_lanes; ++i) { \ - Msa3RInstrHelper_shuffle(opcode, &ws, &wt, &wd, i, \ - num_of_lanes); \ - } - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - VSHF_DF(kMSALanesByte, int8_t); - break; - case MSA_HALF: - VSHF_DF(kMSALanesHalf, int16_t); - break; - case MSA_WORD: - VSHF_DF(kMSALanesWord, int32_t); - break; - case MSA_DWORD: - VSHF_DF(kMSALanesDword, int64_t); - break; - default: - UNREACHABLE(); - } -#undef VSHF_DF - break; - case PCKEV: - case PCKOD: - case ILVL: - case ILVR: - case ILVEV: - case ILVOD: -#define INTERLEAVE_PACK_DF(num_of_lanes, int_type) \ - for (int i = 0; i < num_of_lanes / 2; ++i) { \ - Msa3RInstrHelper_shuffle(opcode, &ws, &wt, &wd, i, \ - num_of_lanes); \ - } - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - INTERLEAVE_PACK_DF(kMSALanesByte, int8_t); - break; - case MSA_HALF: - INTERLEAVE_PACK_DF(kMSALanesHalf, int16_t); - break; - case MSA_WORD: - INTERLEAVE_PACK_DF(kMSALanesWord, int32_t); - break; - case MSA_DWORD: - INTERLEAVE_PACK_DF(kMSALanesDword, int64_t); - break; - default: - UNREACHABLE(); - } - break; -#undef INTERLEAVE_PACK_DF - default: -#define MSA_3R_DF(elem, num_of_lanes) \ - for (int i = 0; i < num_of_lanes; i++) { \ - wd.elem[i] = Msa3RInstrHelper(opcode, wd.elem[i], ws.elem[i], wt.elem[i]); \ - } - - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - MSA_3R_DF(b, kMSALanesByte); - break; - case MSA_HALF: - MSA_3R_DF(h, kMSALanesHalf); - break; - case MSA_WORD: - MSA_3R_DF(w, kMSALanesWord); - break; - case MSA_DWORD: - MSA_3R_DF(d, kMSALanesDword); - break; - default: - UNREACHABLE(); - } -#undef MSA_3R_DF - break; - } - set_msa_register(wd_reg(), &wd); - TraceMSARegWr(&wd); -} - -template -void Msa3RFInstrHelper(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) { - const T_int all_ones = static_cast(-1); - const T_fp s_element = *reinterpret_cast(&ws); - const T_fp t_element = *reinterpret_cast(&wt); - switch (opcode) { - case FCUN: { - if (std::isnan(s_element) || std::isnan(t_element)) { - *wd = all_ones; - } else { - *wd = 0; - } - } break; - case FCEQ: { - if (s_element != t_element || std::isnan(s_element) || - std::isnan(t_element)) { - *wd = 0; - } else { - *wd = all_ones; - } - } break; - case FCUEQ: { - if (s_element == t_element || std::isnan(s_element) || - std::isnan(t_element)) { - *wd = all_ones; - } else { - *wd = 0; - } - } break; - case FCLT: { - if (s_element >= t_element || std::isnan(s_element) || - std::isnan(t_element)) { - *wd = 0; - } else { - *wd = all_ones; - } - } break; - case FCULT: { - if (s_element < t_element || std::isnan(s_element) || - std::isnan(t_element)) { - *wd = all_ones; - } else { - *wd = 0; - } - } break; - case FCLE: { - if (s_element > t_element || std::isnan(s_element) || - std::isnan(t_element)) { - *wd = 0; - } else { - *wd = all_ones; - } - } break; - case FCULE: { - if (s_element <= t_element || std::isnan(s_element) || - std::isnan(t_element)) { - *wd = all_ones; - } else { - *wd = 0; - } - } break; - case FCOR: { - if (std::isnan(s_element) || std::isnan(t_element)) { - *wd = 0; - } else { - *wd = all_ones; - } - } break; - case FCUNE: { - if (s_element != t_element || std::isnan(s_element) || - std::isnan(t_element)) { - *wd = all_ones; - } else { - *wd = 0; - } - } break; - case FCNE: { - if (s_element == t_element || std::isnan(s_element) || - std::isnan(t_element)) { - *wd = 0; - } else { - *wd = all_ones; - } - } break; - case FADD: - *wd = base::bit_cast(s_element + t_element); - break; - case FSUB: - *wd = base::bit_cast(s_element - t_element); - break; - case FMUL: - *wd = base::bit_cast(s_element * t_element); - break; - case FDIV: { - if (t_element == 0) { - *wd = base::bit_cast(std::numeric_limits::quiet_NaN()); - } else { - *wd = base::bit_cast(s_element / t_element); - } - } break; - case FMADD: - *wd = base::bit_cast( - std::fma(s_element, t_element, *reinterpret_cast(wd))); - break; - case FMSUB: - *wd = base::bit_cast( - std::fma(s_element, -t_element, *reinterpret_cast(wd))); - break; - case FEXP2: - *wd = base::bit_cast(std::ldexp(s_element, static_cast(wt))); - break; - case FMIN: - *wd = base::bit_cast(std::min(s_element, t_element)); - break; - case FMAX: - *wd = base::bit_cast(std::max(s_element, t_element)); - break; - case FMIN_A: { - *wd = base::bit_cast( - std::fabs(s_element) < std::fabs(t_element) ? s_element : t_element); - } break; - case FMAX_A: { - *wd = base::bit_cast( - std::fabs(s_element) > std::fabs(t_element) ? s_element : t_element); - } break; - case FSOR: - case FSUNE: - case FSNE: - case FSAF: - case FSUN: - case FSEQ: - case FSUEQ: - case FSLT: - case FSULT: - case FSLE: - case FSULE: - UNIMPLEMENTED(); - break; - default: - UNREACHABLE(); - } -} - -template -void Msa3RFInstrHelper2(uint32_t opcode, T_reg ws, T_reg wt, T_reg* wd) { - // using T_uint = typename std::make_unsigned::type; - using T_uint_dbl = typename std::make_unsigned::type; - const T_int max_int = std::numeric_limits::max(); - const T_int min_int = std::numeric_limits::min(); - const int shift = kBitsPerByte * sizeof(T_int) - 1; - const T_int_dbl reg_s = ws; - const T_int_dbl reg_t = wt; - T_int_dbl product, result; - product = reg_s * reg_t; - switch (opcode) { - case MUL_Q: { - const T_int_dbl min_fix_dbl = - base::bit_cast(std::numeric_limits::min()) >> - 1U; - const T_int_dbl max_fix_dbl = std::numeric_limits::max() >> 1U; - if (product == min_fix_dbl) { - product = max_fix_dbl; - } - *wd = static_cast(product >> shift); - } break; - case MADD_Q: { - result = (product + (static_cast(*wd) << shift)) >> shift; - *wd = static_cast( - result > max_int ? max_int : result < min_int ? min_int : result); - } break; - case MSUB_Q: { - result = (-product + (static_cast(*wd) << shift)) >> shift; - *wd = static_cast( - result > max_int ? max_int : result < min_int ? min_int : result); - } break; - case MULR_Q: { - const T_int_dbl min_fix_dbl = - base::bit_cast(std::numeric_limits::min()) >> - 1U; - const T_int_dbl max_fix_dbl = std::numeric_limits::max() >> 1U; - if (product == min_fix_dbl) { - *wd = static_cast(max_fix_dbl >> shift); - break; - } - *wd = static_cast((product + (1 << (shift - 1))) >> shift); - } break; - case MADDR_Q: { - result = (product + (static_cast(*wd) << shift) + - (1 << (shift - 1))) >> - shift; - *wd = static_cast( - result > max_int ? max_int : result < min_int ? min_int : result); - } break; - case MSUBR_Q: { - result = (-product + (static_cast(*wd) << shift) + - (1 << (shift - 1))) >> - shift; - *wd = static_cast( - result > max_int ? max_int : result < min_int ? min_int : result); - } break; - default: - UNREACHABLE(); - } -} - -void Simulator::DecodeTypeMsa3RF() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsa3RFMask; - msa_reg_t wd, ws, wt; - if (opcode != FCAF) { - get_msa_register(ws_reg(), &ws); - get_msa_register(wt_reg(), &wt); - } - switch (opcode) { - case FCAF: - wd.d[0] = 0; - wd.d[1] = 0; - break; - case FEXDO: -#define PACK_FLOAT16(sign, exp, frac) \ - static_cast(((sign) << 15) + ((exp) << 10) + (frac)) -#define FEXDO_DF(source, dst) \ - do { \ - element = source; \ - aSign = element >> 31; \ - aExp = element >> 23 & 0xFF; \ - aFrac = element & 0x007FFFFF; \ - if (aExp == 0xFF) { \ - if (aFrac) { \ - /* Input is a NaN */ \ - dst = 0x7DFFU; \ - break; \ - } \ - /* Infinity */ \ - dst = PACK_FLOAT16(aSign, 0x1F, 0); \ - break; \ - } else if (aExp == 0 && aFrac == 0) { \ - dst = PACK_FLOAT16(aSign, 0, 0); \ - break; \ - } else { \ - int maxexp = 29; \ - uint32_t mask; \ - uint32_t increment; \ - bool rounding_bumps_exp; \ - aFrac |= 0x00800000; \ - aExp -= 0x71; \ - if (aExp < 1) { \ - /* Will be denormal in halfprec */ \ - mask = 0x00FFFFFF; \ - if (aExp >= -11) { \ - mask >>= 11 + aExp; \ - } \ - } else { \ - /* Normal number in halfprec */ \ - mask = 0x00001FFF; \ - } \ - switch (MSACSR_ & 3) { \ - case kRoundToNearest: \ - increment = (mask + 1) >> 1; \ - if ((aFrac & mask) == increment) { \ - increment = aFrac & (increment << 1); \ - } \ - break; \ - case kRoundToPlusInf: \ - increment = aSign ? 0 : mask; \ - break; \ - case kRoundToMinusInf: \ - increment = aSign ? mask : 0; \ - break; \ - case kRoundToZero: \ - increment = 0; \ - break; \ - } \ - rounding_bumps_exp = (aFrac + increment >= 0x01000000); \ - if (aExp > maxexp || (aExp == maxexp && rounding_bumps_exp)) { \ - dst = PACK_FLOAT16(aSign, 0x1F, 0); \ - break; \ - } \ - aFrac += increment; \ - if (rounding_bumps_exp) { \ - aFrac >>= 1; \ - aExp++; \ - } \ - if (aExp < -10) { \ - dst = PACK_FLOAT16(aSign, 0, 0); \ - break; \ - } \ - if (aExp < 0) { \ - aFrac >>= -aExp; \ - aExp = 0; \ - } \ - dst = PACK_FLOAT16(aSign, aExp, aFrac >> 13); \ - } \ - } while (0); - switch (DecodeMsaDataFormat()) { - case MSA_HALF: - for (int i = 0; i < kMSALanesWord; i++) { - uint_fast32_t element; - uint_fast32_t aSign, aFrac; - int_fast32_t aExp; - FEXDO_DF(ws.uw[i], wd.uh[i + kMSALanesHalf / 2]) - FEXDO_DF(wt.uw[i], wd.uh[i]) - } - break; - case MSA_WORD: - for (int i = 0; i < kMSALanesDword; i++) { - wd.w[i + kMSALanesWord / 2] = base::bit_cast( - static_cast(base::bit_cast(ws.d[i]))); - wd.w[i] = base::bit_cast( - static_cast(base::bit_cast(wt.d[i]))); - } - break; - default: - UNREACHABLE(); - } - break; -#undef PACK_FLOAT16 -#undef FEXDO_DF - case FTQ: -#define FTQ_DF(source, dst, fp_type, int_type) \ - element = base::bit_cast(source) * \ - (1U << (sizeof(int_type) * kBitsPerByte - 1)); \ - if (element > std::numeric_limits::max()) { \ - dst = std::numeric_limits::max(); \ - } else if (element < std::numeric_limits::min()) { \ - dst = std::numeric_limits::min(); \ - } else if (std::isnan(element)) { \ - dst = 0; \ - } else { \ - int_type fixed_point; \ - round_according_to_msacsr(element, &element, &fixed_point); \ - dst = fixed_point; \ - } - - switch (DecodeMsaDataFormat()) { - case MSA_HALF: - for (int i = 0; i < kMSALanesWord; i++) { - float element; - FTQ_DF(ws.w[i], wd.h[i + kMSALanesHalf / 2], float, int16_t) - FTQ_DF(wt.w[i], wd.h[i], float, int16_t) - } - break; - case MSA_WORD: - double element; - for (int i = 0; i < kMSALanesDword; i++) { - FTQ_DF(ws.d[i], wd.w[i + kMSALanesWord / 2], double, int32_t) - FTQ_DF(wt.d[i], wd.w[i], double, int32_t) - } - break; - default: - UNREACHABLE(); - } - break; -#undef FTQ_DF -#define MSA_3RF_DF(T1, T2, Lanes, ws, wt, wd) \ - for (int i = 0; i < Lanes; i++) { \ - Msa3RFInstrHelper(opcode, ws, wt, &(wd)); \ - } -#define MSA_3RF_DF2(T1, T2, Lanes, ws, wt, wd) \ - for (int i = 0; i < Lanes; i++) { \ - Msa3RFInstrHelper2(opcode, ws, wt, &(wd)); \ - } - case MADD_Q: - case MSUB_Q: - case MADDR_Q: - case MSUBR_Q: - get_msa_register(wd_reg(), &wd); - V8_FALLTHROUGH; - case MUL_Q: - case MULR_Q: - switch (DecodeMsaDataFormat()) { - case MSA_HALF: - MSA_3RF_DF2(int16_t, int32_t, kMSALanesHalf, ws.h[i], wt.h[i], - wd.h[i]) - break; - case MSA_WORD: - MSA_3RF_DF2(int32_t, int64_t, kMSALanesWord, ws.w[i], wt.w[i], - wd.w[i]) - break; - default: - UNREACHABLE(); - } - break; - default: - if (opcode == FMADD || opcode == FMSUB) { - get_msa_register(wd_reg(), &wd); - } - switch (DecodeMsaDataFormat()) { - case MSA_WORD: - MSA_3RF_DF(int32_t, float, kMSALanesWord, ws.w[i], wt.w[i], wd.w[i]) - break; - case MSA_DWORD: - MSA_3RF_DF(int64_t, double, kMSALanesDword, ws.d[i], wt.d[i], wd.d[i]) - break; - default: - UNREACHABLE(); - } - break; -#undef MSA_3RF_DF -#undef MSA_3RF_DF2 - } - set_msa_register(wd_reg(), &wd); - TraceMSARegWr(&wd); -} - -void Simulator::DecodeTypeMsaVec() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsaVECMask; - msa_reg_t wd, ws, wt; - - get_msa_register(instr_.WsValue(), ws.w); - get_msa_register(instr_.WtValue(), wt.w); - if (opcode == BMNZ_V || opcode == BMZ_V || opcode == BSEL_V) { - get_msa_register(instr_.WdValue(), wd.w); - } - - for (int i = 0; i < kMSALanesWord; i++) { - switch (opcode) { - case AND_V: - wd.w[i] = ws.w[i] & wt.w[i]; - break; - case OR_V: - wd.w[i] = ws.w[i] | wt.w[i]; - break; - case NOR_V: - wd.w[i] = ~(ws.w[i] | wt.w[i]); - break; - case XOR_V: - wd.w[i] = ws.w[i] ^ wt.w[i]; - break; - case BMNZ_V: - wd.w[i] = (wt.w[i] & ws.w[i]) | (~wt.w[i] & wd.w[i]); - break; - case BMZ_V: - wd.w[i] = (~wt.w[i] & ws.w[i]) | (wt.w[i] & wd.w[i]); - break; - case BSEL_V: - wd.w[i] = (~wd.w[i] & ws.w[i]) | (wd.w[i] & wt.w[i]); - break; - default: - UNREACHABLE(); - } - } - set_msa_register(instr_.WdValue(), wd.w); - TraceMSARegWr(wd.d); -} - -void Simulator::DecodeTypeMsa2R() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsa2RMask; - msa_reg_t wd, ws; - switch (opcode) { - case FILL: - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: { - int32_t rs = get_register(instr_.WsValue()); - for (int i = 0; i < kMSALanesByte; i++) { - wd.b[i] = rs & 0xFFu; - } - set_msa_register(instr_.WdValue(), wd.b); - TraceMSARegWr(wd.b); - break; - } - case MSA_HALF: { - int32_t rs = get_register(instr_.WsValue()); - for (int i = 0; i < kMSALanesHalf; i++) { - wd.h[i] = rs & 0xFFFFu; - } - set_msa_register(instr_.WdValue(), wd.h); - TraceMSARegWr(wd.h); - break; - } - case MSA_WORD: { - int32_t rs = get_register(instr_.WsValue()); - for (int i = 0; i < kMSALanesWord; i++) { - wd.w[i] = rs; - } - set_msa_register(instr_.WdValue(), wd.w); - TraceMSARegWr(wd.w); - break; - } - default: - UNREACHABLE(); - } - break; - case PCNT: -#define PCNT_DF(elem, num_of_lanes) \ - get_msa_register(instr_.WsValue(), ws.elem); \ - for (int i = 0; i < num_of_lanes; i++) { \ - uint64_t u64elem = static_cast(ws.elem[i]); \ - wd.elem[i] = base::bits::CountPopulation(u64elem); \ - } \ - set_msa_register(instr_.WdValue(), wd.elem); \ - TraceMSARegWr(wd.elem) - - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - PCNT_DF(ub, kMSALanesByte); - break; - case MSA_HALF: - PCNT_DF(uh, kMSALanesHalf); - break; - case MSA_WORD: - PCNT_DF(uw, kMSALanesWord); - break; - case MSA_DWORD: - PCNT_DF(ud, kMSALanesDword); - break; - default: - UNREACHABLE(); - } -#undef PCNT_DF - break; - case NLOC: -#define NLOC_DF(elem, num_of_lanes) \ - get_msa_register(instr_.WsValue(), ws.elem); \ - for (int i = 0; i < num_of_lanes; i++) { \ - const uint64_t mask = (num_of_lanes == kMSALanesDword) \ - ? UINT64_MAX \ - : (1ULL << (kMSARegSize / num_of_lanes)) - 1; \ - uint64_t u64elem = static_cast(~ws.elem[i]) & mask; \ - wd.elem[i] = base::bits::CountLeadingZeros64(u64elem) - \ - (64 - kMSARegSize / num_of_lanes); \ - } \ - set_msa_register(instr_.WdValue(), wd.elem); \ - TraceMSARegWr(wd.elem) - - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - NLOC_DF(ub, kMSALanesByte); - break; - case MSA_HALF: - NLOC_DF(uh, kMSALanesHalf); - break; - case MSA_WORD: - NLOC_DF(uw, kMSALanesWord); - break; - case MSA_DWORD: - NLOC_DF(ud, kMSALanesDword); - break; - default: - UNREACHABLE(); - } -#undef NLOC_DF - break; - case NLZC: -#define NLZC_DF(elem, num_of_lanes) \ - get_msa_register(instr_.WsValue(), ws.elem); \ - for (int i = 0; i < num_of_lanes; i++) { \ - uint64_t u64elem = static_cast(ws.elem[i]); \ - wd.elem[i] = base::bits::CountLeadingZeros64(u64elem) - \ - (64 - kMSARegSize / num_of_lanes); \ - } \ - set_msa_register(instr_.WdValue(), wd.elem); \ - TraceMSARegWr(wd.elem) - - switch (DecodeMsaDataFormat()) { - case MSA_BYTE: - NLZC_DF(ub, kMSALanesByte); - break; - case MSA_HALF: - NLZC_DF(uh, kMSALanesHalf); - break; - case MSA_WORD: - NLZC_DF(uw, kMSALanesWord); - break; - case MSA_DWORD: - NLZC_DF(ud, kMSALanesDword); - break; - default: - UNREACHABLE(); - } -#undef NLZC_DF - break; - default: - UNREACHABLE(); - } -} - -#define BIT(n) (0x1LL << n) -#define QUIET_BIT_S(nan) (base::bit_cast(nan) & BIT(22)) -#define QUIET_BIT_D(nan) (base::bit_cast(nan) & BIT(51)) -static inline bool isSnan(float fp) { return !QUIET_BIT_S(fp); } -static inline bool isSnan(double fp) { return !QUIET_BIT_D(fp); } -#undef QUIET_BIT_S -#undef QUIET_BIT_D - -template -T_int Msa2RFInstrHelper(uint32_t opcode, T_src src, T_dst* dst, - Simulator* sim) { - using T_uint = typename std::make_unsigned::type; - switch (opcode) { - case FCLASS: { -#define SNAN_BIT BIT(0) -#define QNAN_BIT BIT(1) -#define NEG_INFINITY_BIT BIT(2) -#define NEG_NORMAL_BIT BIT(3) -#define NEG_SUBNORMAL_BIT BIT(4) -#define NEG_ZERO_BIT BIT(5) -#define POS_INFINITY_BIT BIT(6) -#define POS_NORMAL_BIT BIT(7) -#define POS_SUBNORMAL_BIT BIT(8) -#define POS_ZERO_BIT BIT(9) - T_fp element = *reinterpret_cast(&src); - switch (std::fpclassify(element)) { - case FP_INFINITE: - if (std::signbit(element)) { - *dst = NEG_INFINITY_BIT; - } else { - *dst = POS_INFINITY_BIT; - } - break; - case FP_NAN: - if (isSnan(element)) { - *dst = SNAN_BIT; - } else { - *dst = QNAN_BIT; - } - break; - case FP_NORMAL: - if (std::signbit(element)) { - *dst = NEG_NORMAL_BIT; - } else { - *dst = POS_NORMAL_BIT; - } - break; - case FP_SUBNORMAL: - if (std::signbit(element)) { - *dst = NEG_SUBNORMAL_BIT; - } else { - *dst = POS_SUBNORMAL_BIT; - } - break; - case FP_ZERO: - if (std::signbit(element)) { - *dst = NEG_ZERO_BIT; - } else { - *dst = POS_ZERO_BIT; - } - break; - default: - UNREACHABLE(); - } - break; - } -#undef BIT -#undef SNAN_BIT -#undef QNAN_BIT -#undef NEG_INFINITY_BIT -#undef NEG_NORMAL_BIT -#undef NEG_SUBNORMAL_BIT -#undef NEG_ZERO_BIT -#undef POS_INFINITY_BIT -#undef POS_NORMAL_BIT -#undef POS_SUBNORMAL_BIT -#undef POS_ZERO_BIT - case FTRUNC_S: { - T_fp element = base::bit_cast(src); - const T_int max_int = std::numeric_limits::max(); - const T_int min_int = std::numeric_limits::min(); - if (std::isnan(element)) { - *dst = 0; - } else if (element >= static_cast(max_int) || element <= min_int) { - *dst = element >= static_cast(max_int) ? max_int : min_int; - } else { - *dst = static_cast(std::trunc(element)); - } - break; - } - case FTRUNC_U: { - T_fp element = base::bit_cast(src); - const T_uint max_int = std::numeric_limits::max(); - if (std::isnan(element)) { - *dst = 0; - } else if (element >= static_cast(max_int) || element <= 0) { - *dst = element >= static_cast(max_int) ? max_int : 0; - } else { - *dst = static_cast(std::trunc(element)); - } - break; - } - case FSQRT: { - T_fp element = base::bit_cast(src); - if (element < 0 || std::isnan(element)) { - *dst = base::bit_cast(std::numeric_limits::quiet_NaN()); - } else { - *dst = base::bit_cast(std::sqrt(element)); - } - break; - } - case FRSQRT: { - T_fp element = base::bit_cast(src); - if (element < 0 || std::isnan(element)) { - *dst = base::bit_cast(std::numeric_limits::quiet_NaN()); - } else { - *dst = base::bit_cast(1 / std::sqrt(element)); - } - break; - } - case FRCP: { - T_fp element = base::bit_cast(src); - if (std::isnan(element)) { - *dst = base::bit_cast(std::numeric_limits::quiet_NaN()); - } else { - *dst = base::bit_cast(1 / element); - } - break; - } - case FRINT: { - T_fp element = base::bit_cast(src); - if (std::isnan(element)) { - *dst = base::bit_cast(std::numeric_limits::quiet_NaN()); - } else { - T_int dummy; - sim->round_according_to_msacsr(element, &element, &dummy); - *dst = base::bit_cast(element); - } - break; - } - case FLOG2: { - T_fp element = base::bit_cast(src); - switch (std::fpclassify(element)) { - case FP_NORMAL: - case FP_SUBNORMAL: - *dst = base::bit_cast(std::logb(element)); - break; - case FP_ZERO: - *dst = base::bit_cast(-std::numeric_limits::infinity()); - break; - case FP_NAN: - *dst = base::bit_cast(std::numeric_limits::quiet_NaN()); - break; - case FP_INFINITE: - if (element < 0) { - *dst = - base::bit_cast(std::numeric_limits::quiet_NaN()); - } else { - *dst = base::bit_cast(std::numeric_limits::infinity()); - } - break; - default: - UNREACHABLE(); - } - break; - } - case FTINT_S: { - T_fp element = base::bit_cast(src); - const T_int max_int = std::numeric_limits::max(); - const T_int min_int = std::numeric_limits::min(); - if (std::isnan(element)) { - *dst = 0; - } else if (element < min_int || element > static_cast(max_int)) { - *dst = element > static_cast(max_int) ? max_int : min_int; - } else { - sim->round_according_to_msacsr(element, &element, dst); - } - break; - } - case FTINT_U: { - T_fp element = base::bit_cast(src); - const T_uint max_uint = std::numeric_limits::max(); - if (std::isnan(element)) { - *dst = 0; - } else if (element < 0 || element > static_cast(max_uint)) { - *dst = element > static_cast(max_uint) ? max_uint : 0; - } else { - T_uint res; - sim->round_according_to_msacsr(element, &element, &res); - *dst = *reinterpret_cast(&res); - } - break; - } - case FFINT_S: - *dst = base::bit_cast(static_cast(src)); - break; - case FFINT_U: - using uT_src = typename std::make_unsigned::type; - *dst = - base::bit_cast(static_cast(base::bit_cast(src))); - break; - default: - UNREACHABLE(); - } - return 0; -} - -template -T_int Msa2RFInstrHelper2(uint32_t opcode, T_reg ws, int i) { - switch (opcode) { -#define EXTRACT_FLOAT16_SIGN(fp16) (fp16 >> 15) -#define EXTRACT_FLOAT16_EXP(fp16) (fp16 >> 10 & 0x1F) -#define EXTRACT_FLOAT16_FRAC(fp16) (fp16 & 0x3FF) -#define PACK_FLOAT32(sign, exp, frac) \ - static_cast(((sign) << 31) + ((exp) << 23) + (frac)) -#define FEXUP_DF(src_index) \ - uint_fast16_t element = ws.uh[src_index]; \ - uint_fast32_t aSign, aFrac; \ - int_fast32_t aExp; \ - aSign = EXTRACT_FLOAT16_SIGN(element); \ - aExp = EXTRACT_FLOAT16_EXP(element); \ - aFrac = EXTRACT_FLOAT16_FRAC(element); \ - if (V8_LIKELY(aExp && aExp != 0x1F)) { \ - return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \ - } else if (aExp == 0x1F) { \ - if (aFrac) { \ - return base::bit_cast(std::numeric_limits::quiet_NaN()); \ - } else { \ - return base::bit_cast( \ - std::numeric_limits::infinity()) | \ - static_cast(aSign) << 31; \ - } \ - } else { \ - if (aFrac == 0) { \ - return PACK_FLOAT32(aSign, 0, 0); \ - } else { \ - int_fast16_t shiftCount = \ - base::bits::CountLeadingZeros32(static_cast(aFrac)) - 21; \ - aFrac <<= shiftCount; \ - aExp = -shiftCount; \ - return PACK_FLOAT32(aSign, aExp + 0x70, aFrac << 13); \ - } \ - } - case FEXUPL: - if (std::is_same::value) { - FEXUP_DF(i + kMSALanesWord) - } else { - return base::bit_cast(static_cast( - base::bit_cast(ws.w[i + kMSALanesDword]))); - } - case FEXUPR: - if (std::is_same::value) { - FEXUP_DF(i) - } else { - return base::bit_cast( - static_cast(base::bit_cast(ws.w[i]))); - } - case FFQL: { - if (std::is_same::value) { - return base::bit_cast( - static_cast(ws.h[i + kMSALanesWord]) / (1U << 15)); - } else { - return base::bit_cast( - static_cast(ws.w[i + kMSALanesDword]) / (1U << 31)); - } - break; - } - case FFQR: { - if (std::is_same::value) { - return base::bit_cast(static_cast(ws.h[i]) / - (1U << 15)); - } else { - return base::bit_cast(static_cast(ws.w[i]) / - (1U << 31)); - } - break; - default: - UNREACHABLE(); - } - } -#undef EXTRACT_FLOAT16_SIGN -#undef EXTRACT_FLOAT16_EXP -#undef EXTRACT_FLOAT16_FRAC -#undef PACK_FLOAT32 -#undef FEXUP_DF -} - -void Simulator::DecodeTypeMsa2RF() { - DCHECK(IsMipsArchVariant(kMips32r6)); - DCHECK(CpuFeatures::IsSupported(MIPS_SIMD)); - uint32_t opcode = instr_.InstructionBits() & kMsa2RFMask; - msa_reg_t wd, ws; - get_msa_register(ws_reg(), &ws); - if (opcode == FEXUPL || opcode == FEXUPR || opcode == FFQL || - opcode == FFQR) { - switch (DecodeMsaDataFormat()) { - case MSA_WORD: - for (int i = 0; i < kMSALanesWord; i++) { - wd.w[i] = Msa2RFInstrHelper2(opcode, ws, i); - } - break; - case MSA_DWORD: - for (int i = 0; i < kMSALanesDword; i++) { - wd.d[i] = Msa2RFInstrHelper2(opcode, ws, i); - } - break; - default: - UNREACHABLE(); - } - } else { - switch (DecodeMsaDataFormat()) { - case MSA_WORD: - for (int i = 0; i < kMSALanesWord; i++) { - Msa2RFInstrHelper(opcode, ws.w[i], &wd.w[i], this); - } - break; - case MSA_DWORD: - for (int i = 0; i < kMSALanesDword; i++) { - Msa2RFInstrHelper(opcode, ws.d[i], &wd.d[i], this); - } - break; - default: - UNREACHABLE(); - } - } - set_msa_register(wd_reg(), &wd); - TraceMSARegWr(&wd); -} - -void Simulator::DecodeTypeRegister() { - // ---------- Execution. - switch (instr_.OpcodeFieldRaw()) { - case COP1: - DecodeTypeRegisterCOP1(); - break; - case COP1X: - DecodeTypeRegisterCOP1X(); - break; - case SPECIAL: - DecodeTypeRegisterSPECIAL(); - break; - case SPECIAL2: - DecodeTypeRegisterSPECIAL2(); - break; - case SPECIAL3: - DecodeTypeRegisterSPECIAL3(); - break; - case MSA: - switch (instr_.MSAMinorOpcodeField()) { - case kMsaMinor3R: - DecodeTypeMsa3R(); - break; - case kMsaMinor3RF: - DecodeTypeMsa3RF(); - break; - case kMsaMinorVEC: - DecodeTypeMsaVec(); - break; - case kMsaMinor2R: - DecodeTypeMsa2R(); - break; - case kMsaMinor2RF: - DecodeTypeMsa2RF(); - break; - case kMsaMinorELM: - DecodeTypeMsaELM(); - break; - default: - UNREACHABLE(); - } - break; - default: - UNREACHABLE(); - } -} - -// Type 2: instructions using a 16, 21 or 26 bits immediate. (e.g. beq, beqc). -void Simulator::DecodeTypeImmediate() { - // Instruction fields. - Opcode op = instr_.OpcodeFieldRaw(); - int32_t rs_reg = instr_.RsValue(); - int32_t rs = get_register(instr_.RsValue()); - uint32_t rs_u = static_cast(rs); - int32_t rt_reg = instr_.RtValue(); // Destination register. - int32_t rt = get_register(rt_reg); - int16_t imm16 = instr_.Imm16Value(); - - int32_t ft_reg = instr_.FtValue(); // Destination register. - - // Zero extended immediate. - uint32_t oe_imm16 = 0xFFFF & imm16; - // Sign extended immediate. - int32_t se_imm16 = imm16; - - // Next pc. - int32_t next_pc = bad_ra; - - // Used for conditional branch instructions. - bool execute_branch_delay_instruction = false; - - // Used for arithmetic instructions. - int32_t alu_out = 0; - - // Used for memory instructions. - int32_t addr = 0x0; - - // Branch instructions common part. - auto BranchAndLinkHelper = - [this, &next_pc, &execute_branch_delay_instruction](bool do_branch) { - execute_branch_delay_instruction = true; - int32_t current_pc = get_pc(); - set_register(31, current_pc + 2 * kInstrSize); - if (do_branch) { - int16_t imm16 = this->instr_.Imm16Value(); - next_pc = current_pc + (imm16 << 2) + kInstrSize; - } else { - next_pc = current_pc + 2 * kInstrSize; - } - }; - - auto BranchHelper = [this, &next_pc, - &execute_branch_delay_instruction](bool do_branch) { - execute_branch_delay_instruction = true; - int32_t current_pc = get_pc(); - if (do_branch) { - int16_t imm16 = this->instr_.Imm16Value(); - next_pc = current_pc + (imm16 << 2) + kInstrSize; - } else { - next_pc = current_pc + 2 * kInstrSize; - } - }; - - auto BranchHelper_MSA = [this, &next_pc, imm16, - &execute_branch_delay_instruction](bool do_branch) { - execute_branch_delay_instruction = true; - int32_t current_pc = get_pc(); - const int32_t bitsIn16Int = sizeof(int16_t) * kBitsPerByte; - if (do_branch) { - if (v8_flags.debug_code) { - int16_t bits = imm16 & 0xFC; - if (imm16 >= 0) { - CHECK_EQ(bits, 0); - } else { - CHECK_EQ(bits ^ 0xFC, 0); - } - } - // jump range :[pc + kInstrSize - 512 * kInstrSize, - // pc + kInstrSize + 511 * kInstrSize] - int16_t offset = static_cast(imm16 << (bitsIn16Int - 10)) >> - (bitsIn16Int - 12); - next_pc = current_pc + offset + kInstrSize; - } else { - next_pc = current_pc + 2 * kInstrSize; - } - }; - - auto BranchAndLinkCompactHelper = [this, &next_pc](bool do_branch, int bits) { - int32_t current_pc = get_pc(); - CheckForbiddenSlot(current_pc); - if (do_branch) { - int32_t imm = this->instr_.ImmValue(bits); - imm <<= 32 - bits; - imm >>= 32 - bits; - next_pc = current_pc + (imm << 2) + kInstrSize; - set_register(31, current_pc + kInstrSize); - } - }; - - auto BranchCompactHelper = [this, &next_pc](bool do_branch, int bits) { - int32_t current_pc = get_pc(); - CheckForbiddenSlot(current_pc); - if (do_branch) { - int32_t imm = this->instr_.ImmValue(bits); - imm <<= 32 - bits; - imm >>= 32 - bits; - next_pc = get_pc() + (imm << 2) + kInstrSize; - } - }; - - switch (op) { - // ------------- COP1. Coprocessor instructions. - case COP1: - switch (instr_.RsFieldRaw()) { - case BC1: { // Branch on coprocessor condition. - // Floating point. - uint32_t cc = instr_.FBccValue(); - uint32_t fcsr_cc = get_fcsr_condition_bit(cc); - uint32_t cc_value = test_fcsr_bit(fcsr_cc); - bool do_branch = (instr_.FBtrueValue()) ? cc_value : !cc_value; - BranchHelper(do_branch); - break; - } - case BC1EQZ: - BranchHelper(!(get_fpu_register(ft_reg) & 0x1)); - break; - case BC1NEZ: - BranchHelper(get_fpu_register(ft_reg) & 0x1); - break; - case BZ_V: { - msa_reg_t wt; - get_msa_register(wt_reg(), &wt); - BranchHelper_MSA(wt.d[0] == 0 && wt.d[1] == 0); - } break; -#define BZ_DF(witdh, lanes) \ - { \ - msa_reg_t wt; \ - get_msa_register(wt_reg(), &wt); \ - int i; \ - for (i = 0; i < lanes; ++i) { \ - if (wt.witdh[i] == 0) { \ - break; \ - } \ - } \ - BranchHelper_MSA(i != lanes); \ - } - case BZ_B: - BZ_DF(b, kMSALanesByte) - break; - case BZ_H: - BZ_DF(h, kMSALanesHalf) - break; - case BZ_W: - BZ_DF(w, kMSALanesWord) - break; - case BZ_D: - BZ_DF(d, kMSALanesDword) - break; -#undef BZ_DF - case BNZ_V: { - msa_reg_t wt; - get_msa_register(wt_reg(), &wt); - BranchHelper_MSA(wt.d[0] != 0 || wt.d[1] != 0); - } break; -#define BNZ_DF(witdh, lanes) \ - { \ - msa_reg_t wt; \ - get_msa_register(wt_reg(), &wt); \ - int i; \ - for (i = 0; i < lanes; ++i) { \ - if (wt.witdh[i] == 0) { \ - break; \ - } \ - } \ - BranchHelper_MSA(i == lanes); \ - } - case BNZ_B: - BNZ_DF(b, kMSALanesByte) - break; - case BNZ_H: - BNZ_DF(h, kMSALanesHalf) - break; - case BNZ_W: - BNZ_DF(w, kMSALanesWord) - break; - case BNZ_D: - BNZ_DF(d, kMSALanesDword) - break; -#undef BNZ_DF - default: - UNREACHABLE(); - } - break; - // ------------- REGIMM class. - case REGIMM: - switch (instr_.RtFieldRaw()) { - case BLTZ: - BranchHelper(rs < 0); - break; - case BGEZ: - BranchHelper(rs >= 0); - break; - case BLTZAL: - BranchAndLinkHelper(rs < 0); - break; - case BGEZAL: - BranchAndLinkHelper(rs >= 0); - break; - default: - UNREACHABLE(); - } - break; // case REGIMM. - // ------------- Branch instructions. - // When comparing to zero, the encoding of rt field is always 0, so we don't - // need to replace rt with zero. - case BEQ: - BranchHelper(rs == rt); - break; - case BNE: - BranchHelper(rs != rt); - break; - case POP06: // BLEZALC, BGEZALC, BGEUC, BLEZ (pre-r6) - if (IsMipsArchVariant(kMips32r6)) { - if (rt_reg != 0) { - if (rs_reg == 0) { // BLEZALC - BranchAndLinkCompactHelper(rt <= 0, 16); - } else { - if (rs_reg == rt_reg) { // BGEZALC - BranchAndLinkCompactHelper(rt >= 0, 16); - } else { // BGEUC - BranchCompactHelper( - static_cast(rs) >= static_cast(rt), 16); - } - } - } else { // BLEZ - BranchHelper(rs <= 0); - } - } else { // BLEZ - BranchHelper(rs <= 0); - } - break; - case POP07: // BGTZALC, BLTZALC, BLTUC, BGTZ (pre-r6) - if (IsMipsArchVariant(kMips32r6)) { - if (rt_reg != 0) { - if (rs_reg == 0) { // BGTZALC - BranchAndLinkCompactHelper(rt > 0, 16); - } else { - if (rt_reg == rs_reg) { // BLTZALC - BranchAndLinkCompactHelper(rt < 0, 16); - } else { // BLTUC - BranchCompactHelper( - static_cast(rs) < static_cast(rt), 16); - } - } - } else { // BGTZ - BranchHelper(rs > 0); - } - } else { // BGTZ - BranchHelper(rs > 0); - } - break; - case POP26: // BLEZC, BGEZC, BGEC/BLEC / BLEZL (pre-r6) - if (IsMipsArchVariant(kMips32r6)) { - if (rt_reg != 0) { - if (rs_reg == 0) { // BLEZC - BranchCompactHelper(rt <= 0, 16); - } else { - if (rs_reg == rt_reg) { // BGEZC - BranchCompactHelper(rt >= 0, 16); - } else { // BGEC/BLEC - BranchCompactHelper(rs >= rt, 16); - } - } - } - } else { // BLEZL - BranchAndLinkHelper(rs <= 0); - } - break; - case POP27: // BGTZC, BLTZC, BLTC/BGTC / BGTZL (pre-r6) - if (IsMipsArchVariant(kMips32r6)) { - if (rt_reg != 0) { - if (rs_reg == 0) { // BGTZC - BranchCompactHelper(rt > 0, 16); - } else { - if (rs_reg == rt_reg) { // BLTZC - BranchCompactHelper(rt < 0, 16); - } else { // BLTC/BGTC - BranchCompactHelper(rs < rt, 16); - } - } - } - } else { // BGTZL - BranchAndLinkHelper(rs > 0); - } - break; - case POP66: // BEQZC, JIC - if (rs_reg != 0) { // BEQZC - BranchCompactHelper(rs == 0, 21); - } else { // JIC - next_pc = rt + imm16; - } - break; - case POP76: // BNEZC, JIALC - if (rs_reg != 0) { // BNEZC - BranchCompactHelper(rs != 0, 21); - } else { // JIALC - set_register(31, get_pc() + kInstrSize); - next_pc = rt + imm16; - } - break; - case BC: - BranchCompactHelper(true, 26); - break; - case BALC: - BranchAndLinkCompactHelper(true, 26); - break; - case POP10: // BOVC, BEQZALC, BEQC / ADDI (pre-r6) - if (IsMipsArchVariant(kMips32r6)) { - if (rs_reg >= rt_reg) { // BOVC - if (HaveSameSign(rs, rt)) { - if (rs > 0) { - BranchCompactHelper(rs > Registers::kMaxValue - rt, 16); - } else if (rs < 0) { - BranchCompactHelper(rs < Registers::kMinValue - rt, 16); - } - } - } else { - if (rs_reg == 0) { // BEQZALC - BranchAndLinkCompactHelper(rt == 0, 16); - } else { // BEQC - BranchCompactHelper(rt == rs, 16); - } - } - } else { // ADDI - if (HaveSameSign(rs, se_imm16)) { - if (rs > 0) { - if (rs <= Registers::kMaxValue - se_imm16) { - SignalException(kIntegerOverflow); - } - } else if (rs < 0) { - if (rs >= Registers::kMinValue - se_imm16) { - SignalException(kIntegerUnderflow); - } - } - } - SetResult(rt_reg, rs + se_imm16); - } - break; - case POP30: // BNVC, BNEZALC, BNEC / DADDI (pre-r6) - if (IsMipsArchVariant(kMips32r6)) { - if (rs_reg >= rt_reg) { // BNVC - if (!HaveSameSign(rs, rt) || rs == 0 || rt == 0) { - BranchCompactHelper(true, 16); - } else { - if (rs > 0) { - BranchCompactHelper(rs <= Registers::kMaxValue - rt, 16); - } else if (rs < 0) { - BranchCompactHelper(rs >= Registers::kMinValue - rt, 16); - } - } - } else { - if (rs_reg == 0) { // BNEZALC - BranchAndLinkCompactHelper(rt != 0, 16); - } else { // BNEC - BranchCompactHelper(rt != rs, 16); - } - } - } - break; - // ------------- Arithmetic instructions. - case ADDIU: - SetResult(rt_reg, rs + se_imm16); - break; - case SLTI: - SetResult(rt_reg, rs < se_imm16 ? 1 : 0); - break; - case SLTIU: - SetResult(rt_reg, rs_u < static_cast(se_imm16) ? 1 : 0); - break; - case ANDI: - SetResult(rt_reg, rs & oe_imm16); - break; - case ORI: - SetResult(rt_reg, rs | oe_imm16); - break; - case XORI: - SetResult(rt_reg, rs ^ oe_imm16); - break; - case LUI: - if (rs_reg != 0) { - // AUI - DCHECK(IsMipsArchVariant(kMips32r6)); - SetResult(rt_reg, rs + (se_imm16 << 16)); - } else { - // LUI - SetResult(rt_reg, oe_imm16 << 16); - } - break; - // ------------- Memory instructions. - case LB: - set_register(rt_reg, ReadB(rs + se_imm16)); - break; - case LH: - set_register(rt_reg, ReadH(rs + se_imm16, instr_.instr())); - break; - case LWL: { - // al_offset is offset of the effective address within an aligned word. - uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask; - uint8_t byte_shift = kPointerAlignmentMask - al_offset; - uint32_t mask = (1 << byte_shift * 8) - 1; - addr = rs + se_imm16 - al_offset; - alu_out = ReadW(addr, instr_.instr()); - alu_out <<= byte_shift * 8; - alu_out |= rt & mask; - set_register(rt_reg, alu_out); - break; - } - case LW: - set_register(rt_reg, ReadW(rs + se_imm16, instr_.instr())); - break; - case LBU: - set_register(rt_reg, ReadBU(rs + se_imm16)); - break; - case LHU: - set_register(rt_reg, ReadHU(rs + se_imm16, instr_.instr())); - break; - case LWR: { - // al_offset is offset of the effective address within an aligned word. - uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask; - uint8_t byte_shift = kPointerAlignmentMask - al_offset; - uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0; - addr = rs + se_imm16 - al_offset; - alu_out = ReadW(addr, instr_.instr()); - alu_out = static_cast(alu_out) >> al_offset * 8; - alu_out |= rt & mask; - set_register(rt_reg, alu_out); - break; - } - case SB: - WriteB(rs + se_imm16, static_cast(rt)); - break; - case SH: - WriteH(rs + se_imm16, static_cast(rt), instr_.instr()); - break; - case SWL: { - uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask; - uint8_t byte_shift = kPointerAlignmentMask - al_offset; - uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0; - addr = rs + se_imm16 - al_offset; - // Value to be written in memory. - uint32_t mem_value = ReadW(addr, instr_.instr()) & mask; - mem_value |= static_cast(rt) >> byte_shift * 8; - WriteW(addr, mem_value, instr_.instr()); - break; - } - case SW: - WriteW(rs + se_imm16, rt, instr_.instr()); - break; - case SWR: { - uint8_t al_offset = (rs + se_imm16) & kPointerAlignmentMask; - uint32_t mask = (1 << al_offset * 8) - 1; - addr = rs + se_imm16 - al_offset; - uint32_t mem_value = ReadW(addr, instr_.instr()); - mem_value = (rt << al_offset * 8) | (mem_value & mask); - WriteW(addr, mem_value, instr_.instr()); - break; - } - case LL: { - DCHECK(!IsMipsArchVariant(kMips32r6)); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - addr = rs + se_imm16; - set_register(rt_reg, ReadW(addr, instr_.instr())); - local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word); - GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr, - &global_monitor_thread_); - break; - } - case SC: { - DCHECK(!IsMipsArchVariant(kMips32r6)); - addr = rs + se_imm16; - WriteConditionalW(addr, rt, instr_.instr(), rt_reg); - break; - } - case LWC1: - set_fpu_register_hi_word(ft_reg, 0); - set_fpu_register_word(ft_reg, - ReadW(rs + se_imm16, instr_.instr(), FLOAT)); - if (ft_reg % 2) { - TraceMemRd(rs + se_imm16, get_fpu_register(ft_reg - 1), FLOAT_DOUBLE); - } else { - TraceMemRd(rs + se_imm16, get_fpu_register_word(ft_reg), FLOAT); - } - break; - case LDC1: - set_fpu_register_double(ft_reg, ReadD(rs + se_imm16, instr_.instr())); - TraceMemRd(rs + se_imm16, get_fpu_register(ft_reg), DOUBLE); - break; - case SWC1: - WriteW(rs + se_imm16, get_fpu_register_word(ft_reg), instr_.instr()); - TraceMemWr(rs + se_imm16, get_fpu_register_word(ft_reg)); - break; - case SDC1: - WriteD(rs + se_imm16, get_fpu_register_double(ft_reg), instr_.instr()); - TraceMemWr(rs + se_imm16, get_fpu_register(ft_reg)); - break; - // ------------- PC-Relative instructions. - case PCREL: { - // rt field: checking 5-bits. - int32_t imm21 = instr_.Imm21Value(); - int32_t current_pc = get_pc(); - uint8_t rt = (imm21 >> kImm16Bits); - switch (rt) { - case ALUIPC: - addr = current_pc + (se_imm16 << 16); - alu_out = static_cast(~0x0FFFF) & addr; - break; - case AUIPC: - alu_out = current_pc + (se_imm16 << 16); - break; - default: { - int32_t imm19 = instr_.Imm19Value(); - // rt field: checking the most significant 2-bits. - rt = (imm21 >> kImm19Bits); - switch (rt) { - case LWPC: { - // Set sign. - imm19 <<= (kOpcodeBits + kRsBits + 2); - imm19 >>= (kOpcodeBits + kRsBits + 2); - addr = current_pc + (imm19 << 2); - uint32_t* ptr = reinterpret_cast(addr); - alu_out = *ptr; - break; - } - case ADDIUPC: { - int32_t se_imm19 = imm19 | ((imm19 & 0x40000) ? 0xFFF80000 : 0); - alu_out = current_pc + (se_imm19 << 2); - break; - } - default: - UNREACHABLE(); - } - } - } - SetResult(rs_reg, alu_out); - break; - } - case SPECIAL3: { - switch (instr_.FunctionFieldRaw()) { - case LL_R6: { - DCHECK(IsMipsArchVariant(kMips32r6)); - base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex); - int32_t base = get_register(instr_.BaseValue()); - int32_t offset9 = instr_.Imm9Value(); - addr = base + offset9; - DCHECK_EQ(addr & kPointerAlignmentMask, 0); - set_register(rt_reg, ReadW(base + offset9, instr_.instr())); - local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word); - GlobalMonitor::Get()->NotifyLoadLinked_Locked( - addr, &global_monitor_thread_); - break; - } - case SC_R6: { - DCHECK(IsMipsArchVariant(kMips32r6)); - int32_t base = get_register(instr_.BaseValue()); - int32_t offset9 = instr_.Imm9Value(); - addr = base + offset9; - DCHECK_EQ(addr & kPointerAlignmentMask, 0); - WriteConditionalW(addr, rt, instr_.instr(), rt_reg); - break; - } - default: - UNREACHABLE(); - } - break; - } - case MSA: - switch (instr_.MSAMinorOpcodeField()) { - case kMsaMinorI8: - DecodeTypeMsaI8(); - break; - case kMsaMinorI5: - DecodeTypeMsaI5(); - break; - case kMsaMinorI10: - DecodeTypeMsaI10(); - break; - case kMsaMinorELM: - DecodeTypeMsaELM(); - break; - case kMsaMinorBIT: - DecodeTypeMsaBIT(); - break; - case kMsaMinorMI10: - DecodeTypeMsaMI10(); - break; - default: - UNREACHABLE(); - } - break; - default: - UNREACHABLE(); - } - - if (execute_branch_delay_instruction) { - // Execute branch delay slot - // We don't check for end_sim_pc. First it should not be met as the current - // pc is valid. Secondly a jump should always execute its branch delay slot. - Instruction* branch_delay_instr = - reinterpret_cast(get_pc() + kInstrSize); - BranchDelayInstructionDecode(branch_delay_instr); - } - - // If needed update pc after the branch delay execution. - if (next_pc != bad_ra) { - set_pc(next_pc); - } -} - -// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal). -void Simulator::DecodeTypeJump() { - // instr_ will be overwritten by BranchDelayInstructionDecode(), so we save - // the result of IsLinkingInstruction now. - bool isLinkingInstr = instr_.IsLinkingInstruction(); - // Get current pc. - int32_t current_pc = get_pc(); - // Get unchanged bits of pc. - int32_t pc_high_bits = current_pc & 0xF0000000; - // Next pc. - - int32_t next_pc = pc_high_bits | (instr_.Imm26Value() << 2); - - // Execute branch delay slot. - // We don't check for end_sim_pc. First it should not be met as the current pc - // is valid. Secondly a jump should always execute its branch delay slot. - Instruction* branch_delay_instr = - reinterpret_cast(current_pc + kInstrSize); - BranchDelayInstructionDecode(branch_delay_instr); - - // Update pc and ra if necessary. - // Do this after the branch delay execution. - if (isLinkingInstr) { - set_register(31, current_pc + 2 * kInstrSize); - } - set_pc(next_pc); - pc_modified_ = true; -} - -// Executes the current instruction. -void Simulator::InstructionDecode(Instruction* instr) { - if (v8_flags.check_icache) { - CheckICache(i_cache(), instr); - } - pc_modified_ = false; - v8::base::EmbeddedVector buffer; - if (v8_flags.trace_sim) { - SNPrintF(trace_buf_, "%s", ""); - disasm::NameConverter converter; - disasm::Disassembler dasm(converter); - dasm.InstructionDecode(buffer, reinterpret_cast(instr)); - } - - instr_ = instr; - switch (instr_.InstructionType()) { - case Instruction::kRegisterType: - DecodeTypeRegister(); - break; - case Instruction::kImmediateType: - DecodeTypeImmediate(); - break; - case Instruction::kJumpType: - DecodeTypeJump(); - break; - default: - UNSUPPORTED(); - } - if (v8_flags.trace_sim) { - PrintF(" 0x%08" PRIxPTR " %-44s %s\n", - reinterpret_cast(instr), buffer.begin(), - trace_buf_.begin()); - } - if (!pc_modified_) { - set_register(pc, reinterpret_cast(instr) + kInstrSize); - } -} - -void Simulator::Execute() { - // Get the PC to simulate. Cannot use the accessor here as we need the - // raw PC value and not the one used as input to arithmetic instructions. - int program_counter = get_pc(); - if (v8_flags.stop_sim_at == 0) { - // Fast version of the dispatch loop without checking whether the simulator - // should be stopping at a particular executed instruction. - while (program_counter != end_sim_pc) { - Instruction* instr = reinterpret_cast(program_counter); - icount_++; - InstructionDecode(instr); - program_counter = get_pc(); - } - } else { - // v8_flags.stop_sim_at is at the non-default value. Stop in the debugger - // when we reach the particular instruction count. - while (program_counter != end_sim_pc) { - Instruction* instr = reinterpret_cast(program_counter); - icount_++; - if (icount_ == static_cast(v8_flags.stop_sim_at)) { - MipsDebugger dbg(this); - dbg.Debug(); - } else { - InstructionDecode(instr); - } - program_counter = get_pc(); - } - } -} - -void Simulator::CallInternal(Address entry) { - // Adjust JS-based stack limit to C-based stack limit. - isolate_->stack_guard()->AdjustStackLimitForSimulator(); - - // Prepare to execute the code at entry. - set_register(pc, static_cast(entry)); - // Put down marker for end of simulation. The simulator will stop simulation - // when the PC reaches this value. By saving the "end simulation" value into - // the LR the simulation stops when returning to this call point. - set_register(ra, end_sim_pc); - - // Remember the values of callee-saved registers. - // The code below assumes that r9 is not used as sb (static base) in - // simulator code and therefore is regarded as a callee-saved register. - int32_t s0_val = get_register(s0); - int32_t s1_val = get_register(s1); - int32_t s2_val = get_register(s2); - int32_t s3_val = get_register(s3); - int32_t s4_val = get_register(s4); - int32_t s5_val = get_register(s5); - int32_t s6_val = get_register(s6); - int32_t s7_val = get_register(s7); - int32_t gp_val = get_register(gp); - int32_t sp_val = get_register(sp); - int32_t fp_val = get_register(fp); - - // Set up the callee-saved registers with a known value. To be able to check - // that they are preserved properly across JS execution. - int32_t callee_saved_value = static_cast(icount_); - set_register(s0, callee_saved_value); - set_register(s1, callee_saved_value); - set_register(s2, callee_saved_value); - set_register(s3, callee_saved_value); - set_register(s4, callee_saved_value); - set_register(s5, callee_saved_value); - set_register(s6, callee_saved_value); - set_register(s7, callee_saved_value); - set_register(gp, callee_saved_value); - set_register(fp, callee_saved_value); - - // Start the simulation. - Execute(); - - // Check that the callee-saved registers have been preserved. - CHECK_EQ(callee_saved_value, get_register(s0)); - CHECK_EQ(callee_saved_value, get_register(s1)); - CHECK_EQ(callee_saved_value, get_register(s2)); - CHECK_EQ(callee_saved_value, get_register(s3)); - CHECK_EQ(callee_saved_value, get_register(s4)); - CHECK_EQ(callee_saved_value, get_register(s5)); - CHECK_EQ(callee_saved_value, get_register(s6)); - CHECK_EQ(callee_saved_value, get_register(s7)); - CHECK_EQ(callee_saved_value, get_register(gp)); - CHECK_EQ(callee_saved_value, get_register(fp)); - - // Restore callee-saved registers with the original value. - set_register(s0, s0_val); - set_register(s1, s1_val); - set_register(s2, s2_val); - set_register(s3, s3_val); - set_register(s4, s4_val); - set_register(s5, s5_val); - set_register(s6, s6_val); - set_register(s7, s7_val); - set_register(gp, gp_val); - set_register(sp, sp_val); - set_register(fp, fp_val); -} - -intptr_t Simulator::CallImpl(Address entry, int argument_count, - const intptr_t* arguments) { - // Set up arguments. - - // First four arguments passed in registers. - int reg_arg_count = std::min(4, argument_count); - if (reg_arg_count > 0) set_register(a0, arguments[0]); - if (reg_arg_count > 1) set_register(a1, arguments[1]); - if (reg_arg_count > 2) set_register(a2, arguments[2]); - if (reg_arg_count > 3) set_register(a3, arguments[3]); - - // Remaining arguments passed on stack. - int original_stack = get_register(sp); - // Compute position of stack on entry to generated code. - int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t) - - kCArgsSlotsSize); - if (base::OS::ActivationFrameAlignment() != 0) { - entry_stack &= -base::OS::ActivationFrameAlignment(); - } - // Store remaining arguments on stack, from low to high memory. - intptr_t* stack_argument = reinterpret_cast(entry_stack); - memcpy(stack_argument + kCArgSlotCount, arguments + reg_arg_count, - (argument_count - reg_arg_count) * sizeof(*arguments)); - set_register(sp, entry_stack); - - CallInternal(entry); - - // Pop stack passed arguments. - CHECK_EQ(entry_stack, get_register(sp)); - set_register(sp, original_stack); - - return get_register(v0); -} - -double Simulator::CallFP(Address entry, double d0, double d1) { - if (!IsMipsSoftFloatABI) { - set_fpu_register_double(f12, d0); - set_fpu_register_double(f14, d1); - } else { - int buffer[2]; - DCHECK(sizeof(buffer[0]) * 2 == sizeof(d0)); - memcpy(buffer, &d0, sizeof(d0)); - set_dw_register(a0, buffer); - memcpy(buffer, &d1, sizeof(d1)); - set_dw_register(a2, buffer); - } - CallInternal(entry); - if (!IsMipsSoftFloatABI) { - return get_fpu_register_double(f0); - } else { - return get_double_from_register_pair(v0); - } -} - -uintptr_t Simulator::PushAddress(uintptr_t address) { - int new_sp = get_register(sp) - sizeof(uintptr_t); - uintptr_t* stack_slot = reinterpret_cast(new_sp); - *stack_slot = address; - set_register(sp, new_sp); - return new_sp; -} - -uintptr_t Simulator::PopAddress() { - int current_sp = get_register(sp); - uintptr_t* stack_slot = reinterpret_cast(current_sp); - uintptr_t address = *stack_slot; - set_register(sp, current_sp + sizeof(uintptr_t)); - return address; -} - -Simulator::LocalMonitor::LocalMonitor() - : access_state_(MonitorAccess::Open), - tagged_addr_(0), - size_(TransactionSize::None) {} - -void Simulator::LocalMonitor::Clear() { - access_state_ = MonitorAccess::Open; - tagged_addr_ = 0; - size_ = TransactionSize::None; -} - -void Simulator::LocalMonitor::NotifyLoad() { - if (access_state_ == MonitorAccess::RMW) { - // A non linked load could clear the local monitor. As a result, it's - // most strict to unconditionally clear the local monitor on load. - Clear(); - } -} - -void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr, - TransactionSize size) { - access_state_ = MonitorAccess::RMW; - tagged_addr_ = addr; - size_ = size; -} - -void Simulator::LocalMonitor::NotifyStore() { - if (access_state_ == MonitorAccess::RMW) { - // A non exclusive store could clear the local monitor. As a result, it's - // most strict to unconditionally clear the local monitor on store. - Clear(); - } -} - -bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr, - TransactionSize size) { - if (access_state_ == MonitorAccess::RMW) { - if (addr == tagged_addr_ && size_ == size) { - Clear(); - return true; - } else { - return false; - } - } else { - DCHECK(access_state_ == MonitorAccess::Open); - return false; - } -} - -Simulator::GlobalMonitor::LinkedAddress::LinkedAddress() - : access_state_(MonitorAccess::Open), - tagged_addr_(0), - next_(nullptr), - prev_(nullptr), - failure_counter_(0) {} - -void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() { - access_state_ = MonitorAccess::Open; - tagged_addr_ = 0; -} - -void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked( - uintptr_t addr) { - access_state_ = MonitorAccess::RMW; - tagged_addr_ = addr; -} - -void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() { - if (access_state_ == MonitorAccess::RMW) { - // A non exclusive store could clear the global monitor. As a result, it's - // most strict to unconditionally clear global monitors on store. - Clear_Locked(); - } -} - -bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked( - uintptr_t addr, bool is_requesting_processor) { - if (access_state_ == MonitorAccess::RMW) { - if (is_requesting_processor) { - if (addr == tagged_addr_) { - Clear_Locked(); - // Introduce occasional sc/scd failures. This is to simulate the - // behavior of hardware, which can randomly fail due to background - // cache evictions. - if (failure_counter_++ >= kMaxFailureCounter) { - failure_counter_ = 0; - return false; - } else { - return true; - } - } - } else if ((addr & kExclusiveTaggedAddrMask) == - (tagged_addr_ & kExclusiveTaggedAddrMask)) { - // Check the masked addresses when responding to a successful lock by - // another thread so the implementation is more conservative (i.e. the - // granularity of locking is as large as possible.) - Clear_Locked(); - return false; - } - } - return false; -} - -void Simulator::GlobalMonitor::NotifyLoadLinked_Locked( - uintptr_t addr, LinkedAddress* linked_address) { - linked_address->NotifyLoadLinked_Locked(addr); - PrependProcessor_Locked(linked_address); -} - -void Simulator::GlobalMonitor::NotifyStore_Locked( - LinkedAddress* linked_address) { - // Notify each thread of the store operation. - for (LinkedAddress* iter = head_; iter; iter = iter->next_) { - iter->NotifyStore_Locked(); - } -} - -bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked( - uintptr_t addr, LinkedAddress* linked_address) { - DCHECK(IsProcessorInLinkedList_Locked(linked_address)); - if (linked_address->NotifyStoreConditional_Locked(addr, true)) { - // Notify the other processors that this StoreConditional succeeded. - for (LinkedAddress* iter = head_; iter; iter = iter->next_) { - if (iter != linked_address) { - iter->NotifyStoreConditional_Locked(addr, false); - } - } - return true; - } else { - return false; - } -} - -bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked( - LinkedAddress* linked_address) const { - return head_ == linked_address || linked_address->next_ || - linked_address->prev_; -} - -void Simulator::GlobalMonitor::PrependProcessor_Locked( - LinkedAddress* linked_address) { - if (IsProcessorInLinkedList_Locked(linked_address)) { - return; - } - - if (head_) { - head_->prev_ = linked_address; - } - linked_address->prev_ = nullptr; - linked_address->next_ = head_; - head_ = linked_address; -} - -void Simulator::GlobalMonitor::RemoveLinkedAddress( - LinkedAddress* linked_address) { - base::MutexGuard lock_guard(&mutex); - if (!IsProcessorInLinkedList_Locked(linked_address)) { - return; - } - - if (linked_address->prev_) { - linked_address->prev_->next_ = linked_address->next_; - } else { - head_ = linked_address->next_; - } - if (linked_address->next_) { - linked_address->next_->prev_ = linked_address->prev_; - } - linked_address->prev_ = nullptr; - linked_address->next_ = nullptr; -} - -#undef UNSUPPORTED -#undef SScanF - -} // namespace internal -} // namespace v8 - -#endif // USE_SIMULATOR diff --git a/src/execution/mips/simulator-mips.h b/src/execution/mips/simulator-mips.h deleted file mode 100644 index d49cd9a5d5..0000000000 --- a/src/execution/mips/simulator-mips.h +++ /dev/null @@ -1,719 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Declares a Simulator for MIPS instructions if we are not generating a native -// MIPS binary. This Simulator allows us to run and debug MIPS code generation -// on regular desktop machines. -// V8 calls into generated code via the GeneratedCode wrapper, -// which will start execution in the Simulator or forwards to the real entry -// on a MIPS HW platform. - -#ifndef V8_EXECUTION_MIPS_SIMULATOR_MIPS_H_ -#define V8_EXECUTION_MIPS_SIMULATOR_MIPS_H_ - -// globals.h defines USE_SIMULATOR. -#include "src/common/globals.h" - -template -int Compare(const T& a, const T& b) { - if (a == b) - return 0; - else if (a < b) - return -1; - else - return 1; -} - -// Returns the negative absolute value of its argument. -template ::value>::type> -T Nabs(T a) { - return a < 0 ? a : -a; -} - -#if defined(USE_SIMULATOR) -// Running with a simulator. - -#include "src/base/hashmap.h" -#include "src/base/strings.h" -#include "src/codegen/assembler.h" -#include "src/codegen/mips/constants-mips.h" -#include "src/execution/simulator-base.h" -#include "src/utils/allocation.h" - -namespace v8 { -namespace internal { - -// ----------------------------------------------------------------------------- -// Utility functions - -class CachePage { - public: - static const int LINE_VALID = 0; - static const int LINE_INVALID = 1; - - static const int kPageShift = 12; - static const int kPageSize = 1 << kPageShift; - static const int kPageMask = kPageSize - 1; - static const int kLineShift = 2; // The cache line is only 4 bytes right now. - static const int kLineLength = 1 << kLineShift; - static const int kLineMask = kLineLength - 1; - - CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); } - - char* ValidityByte(int offset) { - return &validity_map_[offset >> kLineShift]; - } - - char* CachedData(int offset) { return &data_[offset]; } - - private: - char data_[kPageSize]; // The cached data. - static const int kValidityMapSize = kPageSize >> kLineShift; - char validity_map_[kValidityMapSize]; // One byte per line. -}; - -class SimInstructionBase : public InstructionBase { - public: - Type InstructionType() const { return type_; } - inline Instruction* instr() const { return instr_; } - inline int32_t operand() const { return operand_; } - - protected: - SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {} - explicit SimInstructionBase(Instruction* instr) {} - - int32_t operand_; - Instruction* instr_; - Type type_; - - private: - DISALLOW_ASSIGN(SimInstructionBase); -}; - -class SimInstruction : public InstructionGetters { - public: - SimInstruction() {} - - explicit SimInstruction(Instruction* instr) { *this = instr; } - - SimInstruction& operator=(Instruction* instr) { - operand_ = *reinterpret_cast(instr); - instr_ = instr; - type_ = InstructionBase::InstructionType(); - DCHECK(reinterpret_cast(&operand_) == this); - return *this; - } -}; - -class Simulator : public SimulatorBase { - public: - friend class MipsDebugger; - - // Registers are declared in order. See SMRL chapter 2. - enum Register { - no_reg = -1, - zero_reg = 0, - at, - v0, - v1, - a0, - a1, - a2, - a3, - t0, - t1, - t2, - t3, - t4, - t5, - t6, - t7, - s0, - s1, - s2, - s3, - s4, - s5, - s6, - s7, - t8, - t9, - k0, - k1, - gp, - sp, - s8, - ra, - // LO, HI, and pc. - LO, - HI, - pc, // pc must be the last register. - kNumSimuRegisters, - // aliases - fp = s8 - }; - - // Coprocessor registers. - // Generated code will always use doubles. So we will only use even registers. - enum FPURegister { - f0, - f1, - f2, - f3, - f4, - f5, - f6, - f7, - f8, - f9, - f10, - f11, - f12, - f13, - f14, - f15, // f12 and f14 are arguments FPURegisters. - f16, - f17, - f18, - f19, - f20, - f21, - f22, - f23, - f24, - f25, - f26, - f27, - f28, - f29, - f30, - f31, - kNumFPURegisters - }; - - // MSA registers - enum MSARegister { - w0, - w1, - w2, - w3, - w4, - w5, - w6, - w7, - w8, - w9, - w10, - w11, - w12, - w13, - w14, - w15, - w16, - w17, - w18, - w19, - w20, - w21, - w22, - w23, - w24, - w25, - w26, - w27, - w28, - w29, - w30, - w31, - kNumMSARegisters - }; - - explicit Simulator(Isolate* isolate); - ~Simulator(); - - // The currently executing Simulator instance. Potentially there can be one - // for each native thread. - V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate); - - // Accessors for register state. Reading the pc value adheres to the MIPS - // architecture specification and is off by a 8 from the currently executing - // instruction. - void set_register(int reg, int32_t value); - void set_dw_register(int dreg, const int* dbl); - int32_t get_register(int reg) const; - double get_double_from_register_pair(int reg); - // Same for FPURegisters. - void set_fpu_register(int fpureg, int64_t value); - void set_fpu_register_word(int fpureg, int32_t value); - void set_fpu_register_hi_word(int fpureg, int32_t value); - void set_fpu_register_float(int fpureg, float value); - void set_fpu_register_double(int fpureg, double value); - void set_fpu_register_invalid_result64(float original, float rounded); - void set_fpu_register_invalid_result(float original, float rounded); - void set_fpu_register_word_invalid_result(float original, float rounded); - void set_fpu_register_invalid_result64(double original, double rounded); - void set_fpu_register_invalid_result(double original, double rounded); - void set_fpu_register_word_invalid_result(double original, double rounded); - int64_t get_fpu_register(int fpureg) const; - int32_t get_fpu_register_word(int fpureg) const; - int32_t get_fpu_register_signed_word(int fpureg) const; - int32_t get_fpu_register_hi_word(int fpureg) const; - float get_fpu_register_float(int fpureg) const; - double get_fpu_register_double(int fpureg) const; - template - void get_msa_register(int wreg, T* value); - template - void set_msa_register(int wreg, const T* value); - void set_fcsr_bit(uint32_t cc, bool value); - bool test_fcsr_bit(uint32_t cc); - void clear_fcsr_cause(); - void set_fcsr_rounding_mode(FPURoundingMode mode); - void set_msacsr_rounding_mode(FPURoundingMode mode); - unsigned int get_fcsr_rounding_mode(); - unsigned int get_msacsr_rounding_mode(); - bool set_fcsr_round_error(double original, double rounded); - bool set_fcsr_round_error(float original, float rounded); - bool set_fcsr_round64_error(double original, double rounded); - bool set_fcsr_round64_error(float original, float rounded); - void round_according_to_fcsr(double toRound, double* rounded, - int32_t* rounded_int, double fs); - void round_according_to_fcsr(float toRound, float* rounded, - int32_t* rounded_int, float fs); - template - void round_according_to_msacsr(Tfp toRound, Tfp* rounded, Tint* rounded_int); - void round64_according_to_fcsr(double toRound, double* rounded, - int64_t* rounded_int, double fs); - void round64_according_to_fcsr(float toRound, float* rounded, - int64_t* rounded_int, float fs); - // Special case of set_register and get_register to access the raw PC value. - void set_pc(int32_t value); - int32_t get_pc() const; - - Address get_sp() const { return static_cast
(get_register(sp)); } - - // Accessor to the internal simulator stack area. - uintptr_t StackLimit(uintptr_t c_limit) const; - - // Executes MIPS instructions until the PC reaches end_sim_pc. - void Execute(); - - template - Return Call(Address entry, Args... args) { - return VariadicCall(this, &Simulator::CallImpl, entry, args...); - } - - // Alternative: call a 2-argument double function. - double CallFP(Address entry, double d0, double d1); - - // Push an address onto the JS stack. - uintptr_t PushAddress(uintptr_t address); - - // Pop an address from the JS stack. - uintptr_t PopAddress(); - - // Debugger input. - void set_last_debugger_input(char* input); - char* last_debugger_input() { return last_debugger_input_; } - - // Redirection support. - static void SetRedirectInstruction(Instruction* instruction); - - // ICache checking. - static bool ICacheMatch(void* one, void* two); - static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start, - size_t size); - - // Returns true if pc register contains one of the 'special_values' defined - // below (bad_ra, end_sim_pc). - bool has_bad_pc() const; - - private: - enum special_values { - // Known bad pc value to ensure that the simulator does not execute - // without being properly setup. - bad_ra = -1, - // A pc value used to signal the simulator to stop execution. Generally - // the ra is set to this value on transition from native C code to - // simulated execution, so that the simulator can "return" to the native - // C code. - end_sim_pc = -2, - // Unpredictable value. - Unpredictable = 0xbadbeaf - }; - - V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count, - const intptr_t* arguments); - - // Unsupported instructions use Format to print an error and stop execution. - void Format(Instruction* instr, const char* format); - - // Helpers for data value tracing. - enum TraceType { BYTE, HALF, WORD, DWORD, FLOAT, DOUBLE, FLOAT_DOUBLE }; - - // MSA Data Format - enum MSADataFormat { MSA_VECT = 0, MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD }; - union msa_reg_t { - int8_t b[kMSALanesByte]; - uint8_t ub[kMSALanesByte]; - int16_t h[kMSALanesHalf]; - uint16_t uh[kMSALanesHalf]; - int32_t w[kMSALanesWord]; - uint32_t uw[kMSALanesWord]; - int64_t d[kMSALanesDword]; - uint64_t ud[kMSALanesDword]; - }; - - // Read and write memory. - inline uint32_t ReadBU(int32_t addr); - inline int32_t ReadB(int32_t addr); - inline void WriteB(int32_t addr, uint8_t value); - inline void WriteB(int32_t addr, int8_t value); - - inline uint16_t ReadHU(int32_t addr, Instruction* instr); - inline int16_t ReadH(int32_t addr, Instruction* instr); - // Note: Overloaded on the sign of the value. - inline void WriteH(int32_t addr, uint16_t value, Instruction* instr); - inline void WriteH(int32_t addr, int16_t value, Instruction* instr); - - inline int ReadW(int32_t addr, Instruction* instr, TraceType t = WORD); - inline void WriteW(int32_t addr, int value, Instruction* instr); - void WriteConditionalW(int32_t addr, int32_t value, Instruction* instr, - int32_t rt_reg); - - inline double ReadD(int32_t addr, Instruction* instr); - inline void WriteD(int32_t addr, double value, Instruction* instr); - - template - T ReadMem(int32_t addr, Instruction* instr); - - template - void WriteMem(int32_t addr, T value, Instruction* instr); - - void TraceRegWr(int32_t value, TraceType t = WORD); - void TraceRegWr(int64_t value, TraceType t = DWORD); - template - void TraceMSARegWr(T* value, TraceType t); - template - void TraceMSARegWr(T* value); - void TraceMemWr(int32_t addr, int32_t value, TraceType t = WORD); - void TraceMemRd(int32_t addr, int32_t value, TraceType t = WORD); - void TraceMemWr(int32_t addr, int64_t value, TraceType t = DWORD); - void TraceMemRd(int32_t addr, int64_t value, TraceType t = DWORD); - template - void TraceMemRd(int32_t addr, T value); - template - void TraceMemWr(int32_t addr, T value); - base::EmbeddedVector trace_buf_; - - // Operations depending on endianness. - // Get Double Higher / Lower word. - inline int32_t GetDoubleHIW(double* addr); - inline int32_t GetDoubleLOW(double* addr); - // Set Double Higher / Lower word. - inline int32_t SetDoubleHIW(double* addr); - inline int32_t SetDoubleLOW(double* addr); - - SimInstruction instr_; - - // Executing is handled based on the instruction type. - void DecodeTypeRegister(); - - // Functions called from DecodeTypeRegister. - void DecodeTypeRegisterCOP1(); - - void DecodeTypeRegisterCOP1X(); - - void DecodeTypeRegisterSPECIAL(); - - void DecodeTypeRegisterSPECIAL2(); - - void DecodeTypeRegisterSPECIAL3(); - - // Called from DecodeTypeRegisterCOP1. - void DecodeTypeRegisterSRsType(); - - void DecodeTypeRegisterDRsType(); - - void DecodeTypeRegisterWRsType(); - - void DecodeTypeRegisterLRsType(); - - int DecodeMsaDataFormat(); - void DecodeTypeMsaI8(); - void DecodeTypeMsaI5(); - void DecodeTypeMsaI10(); - void DecodeTypeMsaELM(); - void DecodeTypeMsaBIT(); - void DecodeTypeMsaMI10(); - void DecodeTypeMsa3R(); - void DecodeTypeMsa3RF(); - void DecodeTypeMsaVec(); - void DecodeTypeMsa2R(); - void DecodeTypeMsa2RF(); - template - T MsaI5InstrHelper(uint32_t opcode, T ws, int32_t i5); - template - T MsaBitInstrHelper(uint32_t opcode, T wd, T ws, int32_t m); - template - T Msa3RInstrHelper(uint32_t opcode, T wd, T ws, T wt); - - inline int32_t rs_reg() const { return instr_.RsValue(); } - inline int32_t rs() const { return get_register(rs_reg()); } - inline uint32_t rs_u() const { - return static_cast(get_register(rs_reg())); - } - inline int32_t rt_reg() const { return instr_.RtValue(); } - inline int32_t rt() const { return get_register(rt_reg()); } - inline uint32_t rt_u() const { - return static_cast(get_register(rt_reg())); - } - inline int32_t rd_reg() const { return instr_.RdValue(); } - inline int32_t fr_reg() const { return instr_.FrValue(); } - inline int32_t fs_reg() const { return instr_.FsValue(); } - inline int32_t ft_reg() const { return instr_.FtValue(); } - inline int32_t fd_reg() const { return instr_.FdValue(); } - inline int32_t sa() const { return instr_.SaValue(); } - inline int32_t lsa_sa() const { return instr_.LsaSaValue(); } - inline int32_t ws_reg() const { return instr_.WsValue(); } - inline int32_t wt_reg() const { return instr_.WtValue(); } - inline int32_t wd_reg() const { return instr_.WdValue(); } - - inline void SetResult(int32_t rd_reg, int32_t alu_out) { - set_register(rd_reg, alu_out); - TraceRegWr(alu_out); - } - - inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) { - set_fpu_register_word(fd_reg, alu_out); - TraceRegWr(get_fpu_register_word(fd_reg)); - } - - inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) { - set_fpu_register(fd_reg, alu_out); - TraceRegWr(get_fpu_register(fd_reg)); - } - - inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) { - set_fpu_register_float(fd_reg, alu_out); - TraceRegWr(get_fpu_register_word(fd_reg), FLOAT); - } - - inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) { - set_fpu_register_double(fd_reg, alu_out); - TraceRegWr(get_fpu_register(fd_reg), DOUBLE); - } - - void DecodeTypeImmediate(); - void DecodeTypeJump(); - - // Used for breakpoints and traps. - void SoftwareInterrupt(); - - // Compact branch guard. - void CheckForbiddenSlot(int32_t current_pc) { - Instruction* instr_after_compact_branch = - reinterpret_cast(current_pc + kInstrSize); - if (instr_after_compact_branch->IsForbiddenAfterBranch()) { - FATAL( - "Error: Unexpected instruction 0x%08x immediately after a " - "compact branch instruction.", - *reinterpret_cast(instr_after_compact_branch)); - } - } - - // Stop helper functions. - bool IsWatchpoint(uint32_t code); - void PrintWatchpoint(uint32_t code); - void HandleStop(uint32_t code, Instruction* instr); - bool IsStopInstruction(Instruction* instr); - bool IsEnabledStop(uint32_t code); - void EnableStop(uint32_t code); - void DisableStop(uint32_t code); - void IncreaseStopCounter(uint32_t code); - void PrintStopInfo(uint32_t code); - - // Executes one instruction. - void InstructionDecode(Instruction* instr); - // Execute one instruction placed in a branch delay slot. - void BranchDelayInstructionDecode(Instruction* instr) { - if (instr->InstructionBits() == nopInstr) { - // Short-cut generic nop instructions. They are always valid and they - // never change the simulator state. - return; - } - - if (instr->IsForbiddenInBranchDelay()) { - FATAL("Eror:Unexpected %i opcode in a branch delay slot.", - instr->OpcodeValue()); - } - InstructionDecode(instr); - base::SNPrintF(trace_buf_, " "); - } - - // ICache. - static void CheckICache(base::CustomMatcherHashMap* i_cache, - Instruction* instr); - static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start, - int size); - static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache, - void* page); - - enum Exception { - none, - kIntegerOverflow, - kIntegerUnderflow, - kDivideByZero, - kNumExceptions - }; - - // Exceptions. - void SignalException(Exception e); - - // Handle arguments and return value for runtime FP functions. - void GetFpArgs(double* x, double* y, int32_t* z); - void SetFpResult(const double& result); - - void CallInternal(Address entry); - - // Architecture state. - // Registers. - int32_t registers_[kNumSimuRegisters]; - // Coprocessor Registers. - // Note: FP32 mode uses only the lower 32-bit part of each element, - // the upper 32-bit is unpredictable. - // Note: FPUregisters_[] array is increased to 64 * 8B = 32 * 16B in - // order to support MSA registers - int64_t FPUregisters_[kNumFPURegisters * 2]; - // FPU control register. - uint32_t FCSR_; - // MSA control register. - uint32_t MSACSR_; - - // Simulator support. - size_t stack_size_; - char* stack_; - bool pc_modified_; - uint64_t icount_; - int break_count_; - - // Debugger input. - char* last_debugger_input_; - - v8::internal::Isolate* isolate_; - - // Registered breakpoints. - Instruction* break_pc_; - Instr break_instr_; - - // Stop is disabled if bit 31 is set. - static const uint32_t kStopDisabledBit = 1 << 31; - - // A stop is enabled, meaning the simulator will stop when meeting the - // instruction, if bit 31 of watched_stops_[code].count is unset. - // The value watched_stops_[code].count & ~(1 << 31) indicates how many times - // the breakpoint was hit or gone through. - struct StopCountAndDesc { - uint32_t count; - char* desc; - }; - StopCountAndDesc watched_stops_[kMaxStopCode + 1]; - - // Synchronization primitives. - enum class MonitorAccess { - Open, - RMW, - }; - - enum class TransactionSize { - None = 0, - Word = 4, - }; - - // The least-significant bits of the address are ignored. The number of bits - // is implementation-defined, between 3 and minimum page size. - static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1); - - class LocalMonitor { - public: - LocalMonitor(); - - // These functions manage the state machine for the local monitor, but do - // not actually perform loads and stores. NotifyStoreConditional only - // returns true if the store conditional is allowed; the global monitor will - // still have to be checked to see whether the memory should be updated. - void NotifyLoad(); - void NotifyLoadLinked(uintptr_t addr, TransactionSize size); - void NotifyStore(); - bool NotifyStoreConditional(uintptr_t addr, TransactionSize size); - - private: - void Clear(); - - MonitorAccess access_state_; - uintptr_t tagged_addr_; - TransactionSize size_; - }; - - class GlobalMonitor { - public: - class LinkedAddress { - public: - LinkedAddress(); - - private: - friend class GlobalMonitor; - // These functions manage the state machine for the global monitor, but do - // not actually perform loads and stores. - void Clear_Locked(); - void NotifyLoadLinked_Locked(uintptr_t addr); - void NotifyStore_Locked(); - bool NotifyStoreConditional_Locked(uintptr_t addr, - bool is_requesting_thread); - - MonitorAccess access_state_; - uintptr_t tagged_addr_; - LinkedAddress* next_; - LinkedAddress* prev_; - // A scd can fail due to background cache evictions. Rather than - // simulating this, we'll just occasionally introduce cases where an - // store conditional fails. This will happen once after every - // kMaxFailureCounter exclusive stores. - static const int kMaxFailureCounter = 5; - int failure_counter_; - }; - - // Exposed so it can be accessed by Simulator::{Read,Write}Ex*. - base::Mutex mutex; - - void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address); - void NotifyStore_Locked(LinkedAddress* linked_address); - bool NotifyStoreConditional_Locked(uintptr_t addr, - LinkedAddress* linked_address); - - // Called when the simulator is destroyed. - void RemoveLinkedAddress(LinkedAddress* linked_address); - - static GlobalMonitor* Get(); - - private: - // Private constructor. Call {GlobalMonitor::Get()} to get the singleton. - GlobalMonitor() = default; - friend class base::LeakyObject; - - bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const; - void PrependProcessor_Locked(LinkedAddress* linked_address); - - LinkedAddress* head_ = nullptr; - }; - - LocalMonitor local_monitor_; - GlobalMonitor::LinkedAddress global_monitor_thread_; -}; - -} // namespace internal -} // namespace v8 - -#endif // defined(USE_SIMULATOR) -#endif // V8_EXECUTION_MIPS_SIMULATOR_MIPS_H_ diff --git a/src/execution/simulator-base.h b/src/execution/simulator-base.h index 50ea722f67..e974d5151b 100644 --- a/src/execution/simulator-base.h +++ b/src/execution/simulator-base.h @@ -146,7 +146,6 @@ class SimulatorBase { // The following are trapping instructions used for various architectures: // - V8_TARGET_ARCH_ARM: svc (Supervisor Call) // - V8_TARGET_ARCH_ARM64: svc (Supervisor Call) -// - V8_TARGET_ARCH_MIPS: swi (software-interrupt) // - V8_TARGET_ARCH_MIPS64: swi (software-interrupt) // - V8_TARGET_ARCH_PPC: svc (Supervisor Call) // - V8_TARGET_ARCH_PPC64: svc (Supervisor Call) diff --git a/src/execution/simulator.h b/src/execution/simulator.h index 4472ad8bd4..11887e6d9a 100644 --- a/src/execution/simulator.h +++ b/src/execution/simulator.h @@ -20,8 +20,6 @@ #include "src/execution/arm/simulator-arm.h" #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/execution/ppc/simulator-ppc.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/execution/mips/simulator-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/execution/mips64/simulator-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index c5fe104876..16f796d43d 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -1777,7 +1777,7 @@ DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing, allow_natives_syntax) DEFINE_IMPLICATION(allow_natives_for_differential_fuzzing, fuzzing) DEFINE_BOOL(parse_only, false, "only parse the sources") -// simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc +// simulator-arm.cc and simulator-arm64.cc. #ifdef USE_SIMULATOR DEFINE_BOOL(trace_sim, false, "Trace simulator execution") DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator") @@ -1795,7 +1795,7 @@ DEFINE_INT(sim_stack_alignment, 8, "Stack alingment in bytes in simulator (4 or 8, 8 is default)") #endif DEFINE_INT(sim_stack_size, 2 * MB / KB, - "Stack size of the ARM64, MIPS, MIPS64 and PPC64 simulator " + "Stack size of the ARM64, MIPS64 and PPC64 simulator " "in kBytes (default is 2 MB)") DEFINE_BOOL(trace_sim_messages, false, "Trace simulator debug messages. Implied by --trace-sim.") diff --git a/src/heap/base/asm/mips/push_registers_asm.cc b/src/heap/base/asm/mips/push_registers_asm.cc deleted file mode 100644 index 4a46caa6c5..0000000000 --- a/src/heap/base/asm/mips/push_registers_asm.cc +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2020 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// Push all callee-saved registers to get them on the stack for conservative -// stack scanning. -// -// See asm/x64/push_registers_clang.cc for why the function is not generated -// using clang. -// -// Do not depend on V8_TARGET_OS_* defines as some embedders may override the -// GN toolchain (e.g. ChromeOS) and not provide them. -asm(".set noreorder \n" - ".global PushAllRegistersAndIterateStack \n" - ".type PushAllRegistersAndIterateStack, %function \n" - ".hidden PushAllRegistersAndIterateStack \n" - "PushAllRegistersAndIterateStack: \n" - // Push all callee-saved registers and save return address. - " addiu $sp, $sp, -48 \n" - " sw $ra, 44($sp) \n" - " sw $s8, 40($sp) \n" - " sw $sp, 36($sp) \n" - " sw $gp, 32($sp) \n" - " sw $s7, 28($sp) \n" - " sw $s6, 24($sp) \n" - " sw $s5, 20($sp) \n" - " sw $s4, 16($sp) \n" - " sw $s3, 12($sp) \n" - " sw $s2, 8($sp) \n" - " sw $s1, 4($sp) \n" - " sw $s0, 0($sp) \n" - // Maintain frame pointer. - " move $s8, $sp \n" - // Pass 1st parameter (a0) unchanged (Stack*). - // Pass 2nd parameter (a1) unchanged (StackVisitor*). - // Save 3rd parameter (a2; IterateStackCallback). - " move $a3, $a2 \n" - // Call the callback. - " jalr $a3 \n" - // Delay slot: Pass 3rd parameter as sp (stack pointer). - " move $a2, $sp \n" - // Load return address. - " lw $ra, 44($sp) \n" - // Restore frame pointer. - " lw $s8, 40($sp) \n" - " jr $ra \n" - // Delay slot: Discard all callee-saved registers. - " addiu $sp, $sp, 48 \n"); diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc index 6fa5ac1a04..0c10c69350 100644 --- a/src/interpreter/interpreter-assembler.cc +++ b/src/interpreter/interpreter-assembler.cc @@ -1457,8 +1457,7 @@ void InterpreterAssembler::TraceBytecodeDispatch(TNode target_bytecode) { // static bool InterpreterAssembler::TargetSupportsUnalignedAccess() { -#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || \ - V8_TARGET_ARCH_RISCV32 +#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_RISCV32 return false; #elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \ diff --git a/src/libsampler/sampler.cc b/src/libsampler/sampler.cc index f3516e3dd4..1a947dc0d5 100644 --- a/src/libsampler/sampler.cc +++ b/src/libsampler/sampler.cc @@ -422,10 +422,6 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) { state->fp = reinterpret_cast(mcontext.regs[29]); // LR is an alias for x30. state->lr = reinterpret_cast(mcontext.regs[30]); -#elif V8_HOST_ARCH_MIPS - state->pc = reinterpret_cast(mcontext.pc); - state->sp = reinterpret_cast(mcontext.gregs[29]); - state->fp = reinterpret_cast(mcontext.gregs[30]); #elif V8_HOST_ARCH_MIPS64 state->pc = reinterpret_cast(mcontext.pc); state->sp = reinterpret_cast(mcontext.gregs[29]); diff --git a/src/logging/log.cc b/src/logging/log.cc index 99fb71f058..b406ab4a17 100644 --- a/src/logging/log.cc +++ b/src/logging/log.cc @@ -692,8 +692,6 @@ void LowLevelLogger::LogCodeInfo() { const char arch[] = "ppc"; #elif V8_TARGET_ARCH_PPC64 const char arch[] = "ppc64"; -#elif V8_TARGET_ARCH_MIPS - const char arch[] = "mips"; #elif V8_TARGET_ARCH_LOONG64 const char arch[] = "loong64"; #elif V8_TARGET_ARCH_ARM64 diff --git a/src/objects/code.cc b/src/objects/code.cc index aff0c05f5f..37092fd10b 100644 --- a/src/objects/code.cc +++ b/src/objects/code.cc @@ -381,11 +381,10 @@ bool Code::IsIsolateIndependent(Isolate* isolate) { #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \ defined(V8_TARGET_ARCH_MIPS64) return RelocIterator(*this, kModeMask).done(); -#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ - defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \ - defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \ - defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \ - defined(V8_TARGET_ARCH_RISCV32) +#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ + defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_S390) || \ + defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_RISCV64) || \ + defined(V8_TARGET_ARCH_LOONG64) || defined(V8_TARGET_ARCH_RISCV32) for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) { // On these platforms we emit relative builtin-to-builtin // jumps for isolate independent builtins in the snapshot. They are later diff --git a/src/profiler/tick-sample.cc b/src/profiler/tick-sample.cc index 766b418835..5b27af707f 100644 --- a/src/profiler/tick-sample.cc +++ b/src/profiler/tick-sample.cc @@ -106,7 +106,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate, state->sp = reinterpret_cast(simulator->sp()); state->fp = reinterpret_cast(simulator->fp()); state->lr = reinterpret_cast(simulator->lr()); -#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 +#elif V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64 if (!simulator->has_bad_pc()) { state->pc = reinterpret_cast(simulator->get_pc()); } diff --git a/src/regexp/mips/regexp-macro-assembler-mips.cc b/src/regexp/mips/regexp-macro-assembler-mips.cc deleted file mode 100644 index d884d71e4c..0000000000 --- a/src/regexp/mips/regexp-macro-assembler-mips.cc +++ /dev/null @@ -1,1359 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_MIPS - -#include "src/regexp/mips/regexp-macro-assembler-mips.h" - -#include "src/codegen/macro-assembler.h" -#include "src/codegen/mips/assembler-mips-inl.h" -#include "src/logging/log.h" -#include "src/objects/code-inl.h" -#include "src/regexp/regexp-stack.h" -#include "src/snapshot/embedded/embedded-data-inl.h" - -namespace v8 { -namespace internal { - -/* - * This assembler uses the following register assignment convention - * - s0 : Unused. - * - s1 : Pointer to current Code object including heap object tag. - * - s2 : Current position in input, as negative offset from end of string. - * Please notice that this is the byte offset, not the character offset! - * - s5 : Currently loaded character. Must be loaded using - * LoadCurrentCharacter before using any of the dispatch methods. - * - s6 : Points to tip of backtrack stack - * - s7 : End of input (points to byte after last character in input). - * - fp : Frame pointer. Used to access arguments, local variables and - * RegExp registers. - * - sp : Points to tip of C stack. - * - * The remaining registers are free for computations. - * Each call to a public method should retain this convention. - * - * The stack will have the following structure: - * - * - fp[56] Isolate* isolate (address of the current isolate) - * - fp[52] direct_call (if 1, direct call from JavaScript code, - * if 0, call through the runtime system). - * - fp[48] capture array size (may fit multiple sets of matches) - * - fp[44] int* capture_array (int[num_saved_registers_], for output). - * --- sp when called --- - * - fp[40] return address (lr). - * - fp[36] old frame pointer (r11). - * - fp[0..32] backup of registers s0..s7. - * --- frame pointer ---- - * - fp[-4] end of input (address of end of string). - * - fp[-8] start of input (address of first character in string). - * - fp[-12] start index (character index of start). - * - fp[-16] void* input_string (location of a handle containing the string). - * - fp[-20] success counter (only for global regexps to count matches). - * - fp[-24] Offset of location before start of input (effectively character - * position -1). Used to initialize capture registers to a - * non-position. - * - fp[-28] At start (if 1, we are starting at the start of the - * string, otherwise 0) - * - fp[-32] register 0 (Only positions must be stored in the first - * - register 1 num_saved_registers_ registers) - * - ... - * - register num_registers-1 - * --- sp --- - * - * The first num_saved_registers_ registers are initialized to point to - * "character -1" in the string (i.e., char_size() bytes before the first - * character of the string). The remaining registers start out as garbage. - * - * The data up to the return address must be placed there by the calling - * code and the remaining arguments are passed in registers, e.g. by calling the - * code entry as cast to a function with the signature: - * int (*match)(String input_string, - * int start_index, - * Address start, - * Address end, - * int* capture_output_array, - * int num_capture_registers, - * bool direct_call = false, - * Isolate* isolate); - * The call is performed by NativeRegExpMacroAssembler::Execute() - * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper. - */ - -#define __ ACCESS_MASM(masm_) - -const int RegExpMacroAssemblerMIPS::kRegExpCodeSize; - -RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone, - Mode mode, - int registers_to_save) - : NativeRegExpMacroAssembler(isolate, zone), - masm_(std::make_unique( - isolate, CodeObjectRequired::kYes, - NewAssemblerBuffer(kRegExpCodeSize))), - no_root_array_scope_(masm_.get()), - mode_(mode), - num_registers_(registers_to_save), - num_saved_registers_(registers_to_save), - entry_label_(), - start_label_(), - success_label_(), - backtrack_label_(), - exit_label_(), - internal_failure_label_() { - DCHECK_EQ(0, registers_to_save % 2); - __ jmp(&entry_label_); // We'll write the entry code later. - // If the code gets too big or corrupted, an internal exception will be - // raised, and we will exit right away. - __ bind(&internal_failure_label_); - __ li(v0, Operand(FAILURE)); - __ Ret(); - __ bind(&start_label_); // And then continue from here. -} - -RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() { - // Unuse labels in case we throw away the assembler without calling GetCode. - entry_label_.Unuse(); - start_label_.Unuse(); - success_label_.Unuse(); - backtrack_label_.Unuse(); - exit_label_.Unuse(); - check_preempt_label_.Unuse(); - stack_overflow_label_.Unuse(); - internal_failure_label_.Unuse(); - fallback_label_.Unuse(); -} - - -int RegExpMacroAssemblerMIPS::stack_limit_slack() { - return RegExpStack::kStackLimitSlack; -} - - -void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) { - if (by != 0) { - __ Addu(current_input_offset(), - current_input_offset(), Operand(by * char_size())); - } -} - - -void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) { - DCHECK_LE(0, reg); - DCHECK_GT(num_registers_, reg); - if (by != 0) { - __ lw(a0, register_location(reg)); - __ Addu(a0, a0, Operand(by)); - __ sw(a0, register_location(reg)); - } -} - - -void RegExpMacroAssemblerMIPS::Backtrack() { - CheckPreemption(); - if (has_backtrack_limit()) { - Label next; - __ Lw(a0, MemOperand(frame_pointer(), kBacktrackCount)); - __ Addu(a0, a0, Operand(1)); - __ Sw(a0, MemOperand(frame_pointer(), kBacktrackCount)); - __ Branch(&next, ne, a0, Operand(backtrack_limit())); - - // Backtrack limit exceeded. - if (can_fallback()) { - __ jmp(&fallback_label_); - } else { - // Can't fallback, so we treat it as a failed match. - Fail(); - } - - __ bind(&next); - } - // Pop Code offset from backtrack stack, add Code and jump to location. - Pop(a0); - __ Addu(a0, a0, code_pointer()); - __ Jump(a0); -} - - -void RegExpMacroAssemblerMIPS::Bind(Label* label) { - __ bind(label); -} - - -void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) { - BranchOrBacktrack(on_equal, eq, current_character(), Operand(c)); -} - -void RegExpMacroAssemblerMIPS::CheckCharacterGT(base::uc16 limit, - Label* on_greater) { - BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit)); -} - -void RegExpMacroAssemblerMIPS::CheckAtStart(int cp_offset, Label* on_at_start) { - __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); - __ Addu(a0, current_input_offset(), - Operand(-char_size() + cp_offset * char_size())); - BranchOrBacktrack(on_at_start, eq, a0, Operand(a1)); -} - - -void RegExpMacroAssemblerMIPS::CheckNotAtStart(int cp_offset, - Label* on_not_at_start) { - __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); - __ Addu(a0, current_input_offset(), - Operand(-char_size() + cp_offset * char_size())); - BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1)); -} - -void RegExpMacroAssemblerMIPS::CheckCharacterLT(base::uc16 limit, - Label* on_less) { - BranchOrBacktrack(on_less, lt, current_character(), Operand(limit)); -} - -void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) { - Label backtrack_non_equal; - __ lw(a0, MemOperand(backtrack_stackpointer(), 0)); - __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0)); - __ Addu(backtrack_stackpointer(), - backtrack_stackpointer(), - Operand(kPointerSize)); - __ bind(&backtrack_non_equal); - BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0)); -} - -void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase( - int start_reg, bool read_backward, bool unicode, Label* on_no_match) { - Label fallthrough; - __ lw(a0, register_location(start_reg)); // Index of start of capture. - __ lw(a1, register_location(start_reg + 1)); // Index of end of capture. - __ Subu(a1, a1, a0); // Length of capture. - - // At this point, the capture registers are either both set or both cleared. - // If the capture length is zero, then the capture is either empty or cleared. - // Fall through in both cases. - __ Branch(&fallthrough, eq, a1, Operand(zero_reg)); - - if (read_backward) { - __ lw(t0, MemOperand(frame_pointer(), kStringStartMinusOne)); - __ Addu(t0, t0, a1); - BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t0)); - } else { - __ Addu(t5, a1, current_input_offset()); - // Check that there are enough characters left in the input. - BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg)); - } - - if (mode_ == LATIN1) { - Label success; - Label fail; - Label loop_check; - - // a0 - offset of start of capture. - // a1 - length of capture. - __ Addu(a0, a0, Operand(end_of_input_address())); - __ Addu(a2, end_of_input_address(), Operand(current_input_offset())); - if (read_backward) { - __ Subu(a2, a2, Operand(a1)); - } - __ Addu(a1, a0, Operand(a1)); - - // a0 - Address of start of capture. - // a1 - Address of end of capture. - // a2 - Address of current input position. - - Label loop; - __ bind(&loop); - __ lbu(a3, MemOperand(a0, 0)); - __ addiu(a0, a0, char_size()); - __ lbu(t0, MemOperand(a2, 0)); - __ addiu(a2, a2, char_size()); - - __ Branch(&loop_check, eq, t0, Operand(a3)); - - // Mismatch, try case-insensitive match (converting letters to lower-case). - __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case. - __ Or(t0, t0, Operand(0x20)); // Also convert input character. - __ Branch(&fail, ne, t0, Operand(a3)); - __ Subu(a3, a3, Operand('a')); - __ Branch(&loop_check, ls, a3, Operand('z' - 'a')); - // Latin-1: Check for values in range [224,254] but not 247. - __ Subu(a3, a3, Operand(224 - 'a')); - // Weren't Latin-1 letters. - __ Branch(&fail, hi, a3, Operand(254 - 224)); - // Check for 247. - __ Branch(&fail, eq, a3, Operand(247 - 224)); - - __ bind(&loop_check); - __ Branch(&loop, lt, a0, Operand(a1)); - __ jmp(&success); - - __ bind(&fail); - GoTo(on_no_match); - - __ bind(&success); - // Compute new value of character position after the matched part. - __ Subu(current_input_offset(), a2, end_of_input_address()); - if (read_backward) { - __ lw(t0, register_location(start_reg)); // Index of start of capture. - __ lw(t5, register_location(start_reg + 1)); // Index of end of capture. - __ Addu(current_input_offset(), current_input_offset(), Operand(t0)); - __ Subu(current_input_offset(), current_input_offset(), Operand(t5)); - } - } else { - DCHECK_EQ(UC16, mode_); - - int argument_count = 4; - __ PrepareCallCFunction(argument_count, a2); - - // a0 - offset of start of capture. - // a1 - length of capture. - - // Put arguments into arguments registers. - // Parameters are - // a0: Address byte_offset1 - Address captured substring's start. - // a1: Address byte_offset2 - Address of current character position. - // a2: size_t byte_length - length of capture in bytes(!). - // a3: Isolate* isolate. - - // Address of start of capture. - __ Addu(a0, a0, Operand(end_of_input_address())); - // Length of capture. - __ mov(a2, a1); - // Save length in callee-save register for use on return. - __ mov(s3, a1); - // Address of current input position. - __ Addu(a1, current_input_offset(), Operand(end_of_input_address())); - if (read_backward) { - __ Subu(a1, a1, Operand(s3)); - } - // Isolate. - __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate()))); - - { - AllowExternalCallThatCantCauseGC scope(masm_.get()); - ExternalReference function = - unicode - ? ExternalReference::re_case_insensitive_compare_unicode() - : ExternalReference::re_case_insensitive_compare_non_unicode(); - __ CallCFunction(function, argument_count); - } - - // Check if function returned non-zero for success or zero for failure. - BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg)); - // On success, advance position by length of capture. - if (read_backward) { - __ Subu(current_input_offset(), current_input_offset(), Operand(s3)); - } else { - __ Addu(current_input_offset(), current_input_offset(), Operand(s3)); - } - } - - __ bind(&fallthrough); -} - -void RegExpMacroAssemblerMIPS::CheckNotBackReference(int start_reg, - bool read_backward, - Label* on_no_match) { - Label fallthrough; - - // Find length of back-referenced capture. - __ lw(a0, register_location(start_reg)); - __ lw(a1, register_location(start_reg + 1)); - __ Subu(a1, a1, a0); // Length to check. - - // At this point, the capture registers are either both set or both cleared. - // If the capture length is zero, then the capture is either empty or cleared. - // Fall through in both cases. - __ Branch(&fallthrough, le, a1, Operand(zero_reg)); - - if (read_backward) { - __ lw(t0, MemOperand(frame_pointer(), kStringStartMinusOne)); - __ Addu(t0, t0, a1); - BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t0)); - } else { - __ Addu(t5, a1, current_input_offset()); - // Check that there are enough characters left in the input. - BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg)); - } - - // a0 - offset of start of capture. - // a1 - length of capture. - __ Addu(a0, a0, Operand(end_of_input_address())); - __ Addu(a2, end_of_input_address(), Operand(current_input_offset())); - if (read_backward) { - __ Subu(a2, a2, Operand(a1)); - } - __ Addu(a1, a0, Operand(a1)); - - // a0 - Address of start of capture. - // a1 - Address of end of capture. - // a2 - Address of current input position. - - - Label loop; - __ bind(&loop); - if (mode_ == LATIN1) { - __ lbu(a3, MemOperand(a0, 0)); - __ addiu(a0, a0, char_size()); - __ lbu(t0, MemOperand(a2, 0)); - __ addiu(a2, a2, char_size()); - } else { - DCHECK(mode_ == UC16); - __ lhu(a3, MemOperand(a0, 0)); - __ addiu(a0, a0, char_size()); - __ lhu(t0, MemOperand(a2, 0)); - __ addiu(a2, a2, char_size()); - } - BranchOrBacktrack(on_no_match, ne, a3, Operand(t0)); - __ Branch(&loop, lt, a0, Operand(a1)); - - // Move current character position to position after match. - __ Subu(current_input_offset(), a2, end_of_input_address()); - if (read_backward) { - __ lw(t0, register_location(start_reg)); // Index of start of capture. - __ lw(t5, register_location(start_reg + 1)); // Index of end of capture. - __ Addu(current_input_offset(), current_input_offset(), Operand(t0)); - __ Subu(current_input_offset(), current_input_offset(), Operand(t5)); - } - __ bind(&fallthrough); -} - - -void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c, - Label* on_not_equal) { - BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c)); -} - - -void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c, - uint32_t mask, - Label* on_equal) { - __ And(a0, current_character(), Operand(mask)); - Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); - BranchOrBacktrack(on_equal, eq, a0, rhs); -} - - -void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c, - uint32_t mask, - Label* on_not_equal) { - __ And(a0, current_character(), Operand(mask)); - Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c); - BranchOrBacktrack(on_not_equal, ne, a0, rhs); -} - -void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd( - base::uc16 c, base::uc16 minus, base::uc16 mask, Label* on_not_equal) { - DCHECK_GT(String::kMaxUtf16CodeUnit, minus); - __ Subu(a0, current_character(), Operand(minus)); - __ And(a0, a0, Operand(mask)); - BranchOrBacktrack(on_not_equal, ne, a0, Operand(c)); -} - -void RegExpMacroAssemblerMIPS::CheckCharacterInRange(base::uc16 from, - base::uc16 to, - Label* on_in_range) { - __ Subu(a0, current_character(), Operand(from)); - // Unsigned lower-or-same condition. - BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from)); -} - -void RegExpMacroAssemblerMIPS::CheckCharacterNotInRange( - base::uc16 from, base::uc16 to, Label* on_not_in_range) { - __ Subu(a0, current_character(), Operand(from)); - // Unsigned higher condition. - BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from)); -} - -void RegExpMacroAssemblerMIPS::CallIsCharacterInRangeArray( - const ZoneList* ranges) { - static const int kNumArguments = 3; - __ PrepareCallCFunction(kNumArguments, a0); - - __ mov(a0, current_character()); - __ li(a1, Operand(GetOrAddRangeArray(ranges))); - __ li(a2, Operand(ExternalReference::isolate_address(isolate()))); - - { - // We have a frame (set up in GetCode), but the assembler doesn't know. - FrameScope scope(masm_.get(), StackFrame::MANUAL); - __ CallCFunction(ExternalReference::re_is_character_in_range_array(), - kNumArguments); - } - - __ li(code_pointer(), Operand(masm_->CodeObject())); -} - -bool RegExpMacroAssemblerMIPS::CheckCharacterInRangeArray( - const ZoneList* ranges, Label* on_in_range) { - CallIsCharacterInRangeArray(ranges); - BranchOrBacktrack(on_in_range, ne, v0, Operand(zero_reg)); - return true; -} - -bool RegExpMacroAssemblerMIPS::CheckCharacterNotInRangeArray( - const ZoneList* ranges, Label* on_not_in_range) { - CallIsCharacterInRangeArray(ranges); - BranchOrBacktrack(on_not_in_range, eq, v0, Operand(zero_reg)); - return true; -} - -void RegExpMacroAssemblerMIPS::CheckBitInTable( - Handle table, - Label* on_bit_set) { - __ li(a0, Operand(table)); - if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) { - __ And(a1, current_character(), Operand(kTableSize - 1)); - __ Addu(a0, a0, a1); - } else { - __ Addu(a0, a0, current_character()); - } - - __ lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize)); - BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg)); -} - -bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass( - StandardCharacterSet type, Label* on_no_match) { - // Range checks (c in min..max) are generally implemented by an unsigned - // (c - min) <= (max - min) check. - // TODO(jgruber): No custom implementation (yet): s(UC16), S(UC16). - switch (type) { - case StandardCharacterSet::kWhitespace: - // Match space-characters. - if (mode_ == LATIN1) { - // One byte space characters are '\t'..'\r', ' ' and \u00a0. - Label success; - __ Branch(&success, eq, current_character(), Operand(' ')); - // Check range 0x09..0x0D. - __ Subu(a0, current_character(), Operand('\t')); - __ Branch(&success, ls, a0, Operand('\r' - '\t')); - // \u00a0 (NBSP). - BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t')); - __ bind(&success); - return true; - } - return false; - case StandardCharacterSet::kNotWhitespace: - // The emitted code for generic character classes is good enough. - return false; - case StandardCharacterSet::kDigit: - // Match Latin1 digits ('0'..'9'). - __ Subu(a0, current_character(), Operand('0')); - BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0')); - return true; - case StandardCharacterSet::kNotDigit: - // Match non Latin1-digits. - __ Subu(a0, current_character(), Operand('0')); - BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0')); - return true; - case StandardCharacterSet::kNotLineTerminator: { - // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). - __ Xor(a0, current_character(), Operand(0x01)); - // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. - __ Subu(a0, a0, Operand(0x0B)); - BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B)); - if (mode_ == UC16) { - // Compare original value to 0x2028 and 0x2029, using the already - // computed (current_char ^ 0x01 - 0x0B). I.e., check for - // 0x201D (0x2028 - 0x0B) or 0x201E. - __ Subu(a0, a0, Operand(0x2028 - 0x0B)); - BranchOrBacktrack(on_no_match, ls, a0, Operand(1)); - } - return true; - } - case StandardCharacterSet::kLineTerminator: { - // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029). - __ Xor(a0, current_character(), Operand(0x01)); - // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C. - __ Subu(a0, a0, Operand(0x0B)); - if (mode_ == LATIN1) { - BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B)); - } else { - Label done; - BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B)); - // Compare original value to 0x2028 and 0x2029, using the already - // computed (current_char ^ 0x01 - 0x0B). I.e., check for - // 0x201D (0x2028 - 0x0B) or 0x201E. - __ Subu(a0, a0, Operand(0x2028 - 0x0B)); - BranchOrBacktrack(on_no_match, hi, a0, Operand(1)); - __ bind(&done); - } - return true; - } - case StandardCharacterSet::kWord: { - if (mode_ != LATIN1) { - // Table is 256 entries, so all Latin1 characters can be tested. - BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z')); - } - ExternalReference map = ExternalReference::re_word_character_map(); - __ li(a0, Operand(map)); - __ Addu(a0, a0, current_character()); - __ lbu(a0, MemOperand(a0, 0)); - BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg)); - return true; - } - case StandardCharacterSet::kNotWord: { - Label done; - if (mode_ != LATIN1) { - // Table is 256 entries, so all Latin1 characters can be tested. - __ Branch(&done, hi, current_character(), Operand('z')); - } - ExternalReference map = ExternalReference::re_word_character_map(); - __ li(a0, Operand(map)); - __ Addu(a0, a0, current_character()); - __ lbu(a0, MemOperand(a0, 0)); - BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg)); - if (mode_ != LATIN1) { - __ bind(&done); - } - return true; - } - case StandardCharacterSet::kEverything: - // Match any character. - return true; - } -} - -void RegExpMacroAssemblerMIPS::Fail() { - __ li(v0, Operand(FAILURE)); - __ jmp(&exit_label_); -} - -void RegExpMacroAssemblerMIPS::LoadRegExpStackPointerFromMemory(Register dst) { - ExternalReference ref = - ExternalReference::address_of_regexp_stack_stack_pointer(isolate()); - __ li(dst, Operand(ref)); - __ Lw(dst, MemOperand(dst)); -} - -void RegExpMacroAssemblerMIPS::StoreRegExpStackPointerToMemory( - Register src, Register scratch) { - ExternalReference ref = - ExternalReference::address_of_regexp_stack_stack_pointer(isolate()); - __ li(scratch, Operand(ref)); - __ Sw(src, MemOperand(scratch)); -} - -void RegExpMacroAssemblerMIPS::PushRegExpBasePointer(Register stack_pointer, - Register scratch) { - ExternalReference ref = - ExternalReference::address_of_regexp_stack_memory_top_address(isolate()); - __ li(scratch, Operand(ref)); - __ Lw(scratch, MemOperand(scratch)); - __ Subu(scratch, stack_pointer, scratch); - __ Sw(scratch, MemOperand(frame_pointer(), kRegExpStackBasePointer)); -} - -void RegExpMacroAssemblerMIPS::PopRegExpBasePointer(Register stack_pointer_out, - Register scratch) { - ExternalReference ref = - ExternalReference::address_of_regexp_stack_memory_top_address(isolate()); - __ Lw(stack_pointer_out, - MemOperand(frame_pointer(), kRegExpStackBasePointer)); - __ li(scratch, Operand(ref)); - __ Lw(scratch, MemOperand(scratch)); - __ Addu(stack_pointer_out, stack_pointer_out, scratch); - StoreRegExpStackPointerToMemory(stack_pointer_out, scratch); -} - -Handle RegExpMacroAssemblerMIPS::GetCode(Handle source) { - Label return_v0; - if (masm_->has_exception()) { - // If the code gets corrupted due to long regular expressions and lack of - // space on trampolines, an internal exception flag is set. If this case - // is detected, we will jump into exit sequence right away. - __ bind_to(&entry_label_, internal_failure_label_.pos()); - } else { - // Finalize code - write the entry point code now we know how many - // registers we need. - - // Entry code: - __ bind(&entry_label_); - - // Tell the system that we have a stack frame. Because the type is MANUAL, - // no is generated. - FrameScope scope(masm_.get(), StackFrame::MANUAL); - - // Actually emit code to start a new stack frame. - // Push arguments - // Save callee-save registers. - // Start new stack frame. - // Store link register in existing stack-cell. - // Order here should correspond to order of offset constants in header file. - RegList registers_to_retain = {s0, s1, s2, s3, s4, s5, s6, s7, fp}; - RegList argument_registers = {a0, a1, a2, a3}; - __ MultiPush(argument_registers | registers_to_retain | ra); - // Set frame pointer in space for it if this is not a direct call - // from generated code. - __ Addu(frame_pointer(), sp, Operand(4 * kPointerSize)); - - static_assert(kSuccessfulCaptures == kInputString - kSystemPointerSize); - __ mov(a0, zero_reg); - __ push(a0); // Make room for success counter and initialize it to 0. - static_assert(kStringStartMinusOne == - kSuccessfulCaptures - kSystemPointerSize); - __ push(a0); // Make room for "string start - 1" constant. - static_assert(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize); - __ push(a0); - static_assert(kRegExpStackBasePointer == - kBacktrackCount - kSystemPointerSize); - __ push(a0); // The regexp stack base ptr. - - // Initialize backtrack stack pointer. It must not be clobbered from here - // on. Note the backtrack_stackpointer is callee-saved. - static_assert(backtrack_stackpointer() == s7); - LoadRegExpStackPointerFromMemory(backtrack_stackpointer()); - - // Store the regexp base pointer - we'll later restore it / write it to - // memory when returning from this irregexp code object. - PushRegExpBasePointer(backtrack_stackpointer(), a1); - - { - // Check if we have space on the stack for registers. - Label stack_limit_hit, stack_ok; - - ExternalReference stack_limit = - ExternalReference::address_of_jslimit(masm_->isolate()); - __ li(a0, Operand(stack_limit)); - __ lw(a0, MemOperand(a0)); - __ Subu(a0, sp, a0); - // Handle it if the stack pointer is already below the stack limit. - __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg)); - // Check if there is room for the variable number of registers above - // the stack limit. - __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize)); - // Exit with OutOfMemory exception. There is not enough space on the stack - // for our working registers. - __ li(v0, Operand(EXCEPTION)); - __ jmp(&return_v0); - - __ bind(&stack_limit_hit); - CallCheckStackGuardState(a0); - // If returned value is non-zero, we exit with the returned value as - // result. - __ Branch(&return_v0, ne, v0, Operand(zero_reg)); - - __ bind(&stack_ok); - } - - // Allocate space on stack for registers. - __ Subu(sp, sp, Operand(num_registers_ * kPointerSize)); - // Load string end. - __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); - // Load input start. - __ lw(a0, MemOperand(frame_pointer(), kInputStart)); - // Find negative length (offset of start relative to end). - __ Subu(current_input_offset(), a0, end_of_input_address()); - // Set a0 to address of char before start of the input string - // (effectively string position -1). - __ lw(a1, MemOperand(frame_pointer(), kStartIndex)); - __ Subu(a0, current_input_offset(), Operand(char_size())); - __ sll(t5, a1, (mode_ == UC16) ? 1 : 0); - __ Subu(a0, a0, t5); - // Store this value in a local variable, for use when clearing - // position registers. - __ sw(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); - - // Initialize code pointer register - __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE); - - Label load_char_start_regexp; - { - Label start_regexp; - // Load newline if index is at start, previous character otherwise. - __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg)); - __ li(current_character(), Operand('\n')); - __ jmp(&start_regexp); - - // Global regexp restarts matching here. - __ bind(&load_char_start_regexp); - // Load previous char as initial value of current character register. - LoadCurrentCharacterUnchecked(-1, 1); - __ bind(&start_regexp); - } - - // Initialize on-stack registers. - if (num_saved_registers_ > 0) { // Always is, if generated from a regexp. - // Fill saved registers with initial value = start offset - 1. - if (num_saved_registers_ > 8) { - // Address of register 0. - __ Addu(a1, frame_pointer(), Operand(kRegisterZero)); - __ li(a2, Operand(num_saved_registers_)); - Label init_loop; - __ bind(&init_loop); - __ sw(a0, MemOperand(a1)); - __ Addu(a1, a1, Operand(-kPointerSize)); - __ Subu(a2, a2, Operand(1)); - __ Branch(&init_loop, ne, a2, Operand(zero_reg)); - } else { - for (int i = 0; i < num_saved_registers_; i++) { - __ sw(a0, register_location(i)); - } - } - } - - __ jmp(&start_label_); - - - // Exit code: - if (success_label_.is_linked()) { - // Save captures when successful. - __ bind(&success_label_); - if (num_saved_registers_ > 0) { - // Copy captures to output. - __ lw(a1, MemOperand(frame_pointer(), kInputStart)); - __ lw(a0, MemOperand(frame_pointer(), kRegisterOutput)); - __ lw(a2, MemOperand(frame_pointer(), kStartIndex)); - __ Subu(a1, end_of_input_address(), a1); - // a1 is length of input in bytes. - if (mode_ == UC16) { - __ srl(a1, a1, 1); - } - // a1 is length of input in characters. - __ Addu(a1, a1, Operand(a2)); - // a1 is length of string in characters. - - DCHECK_EQ(0, num_saved_registers_ % 2); - // Always an even number of capture registers. This allows us to - // unroll the loop once to add an operation between a load of a register - // and the following use of that register. - for (int i = 0; i < num_saved_registers_; i += 2) { - __ lw(a2, register_location(i)); - __ lw(a3, register_location(i + 1)); - if (i == 0 && global_with_zero_length_check()) { - // Keep capture start in a4 for the zero-length check later. - __ mov(t7, a2); - } - if (mode_ == UC16) { - __ sra(a2, a2, 1); - __ Addu(a2, a2, a1); - __ sra(a3, a3, 1); - __ Addu(a3, a3, a1); - } else { - __ Addu(a2, a1, Operand(a2)); - __ Addu(a3, a1, Operand(a3)); - } - __ sw(a2, MemOperand(a0)); - __ Addu(a0, a0, kPointerSize); - __ sw(a3, MemOperand(a0)); - __ Addu(a0, a0, kPointerSize); - } - } - - if (global()) { - // Restart matching if the regular expression is flagged as global. - __ lw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); - __ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); - __ lw(a2, MemOperand(frame_pointer(), kRegisterOutput)); - // Increment success counter. - __ Addu(a0, a0, 1); - __ sw(a0, MemOperand(frame_pointer(), kSuccessfulCaptures)); - // Capture results have been stored, so the number of remaining global - // output registers is reduced by the number of stored captures. - __ Subu(a1, a1, num_saved_registers_); - // Check whether we have enough room for another set of capture results. - __ mov(v0, a0); - __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_)); - - __ sw(a1, MemOperand(frame_pointer(), kNumOutputRegisters)); - // Advance the location for output. - __ Addu(a2, a2, num_saved_registers_ * kPointerSize); - __ sw(a2, MemOperand(frame_pointer(), kRegisterOutput)); - - // Prepare a0 to initialize registers with its value in the next run. - __ lw(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); - - // Restore the original regexp stack pointer value (effectively, pop the - // stored base pointer). - PopRegExpBasePointer(backtrack_stackpointer(), a2); - - if (global_with_zero_length_check()) { - // Special case for zero-length matches. - // t7: capture start index - // Not a zero-length match, restart. - __ Branch( - &load_char_start_regexp, ne, current_input_offset(), Operand(t7)); - // Offset from the end is zero if we already reached the end. - __ Branch(&exit_label_, eq, current_input_offset(), - Operand(zero_reg)); - // Advance current position after a zero-length match. - Label advance; - __ bind(&advance); - __ Addu(current_input_offset(), current_input_offset(), - Operand((mode_ == UC16) ? 2 : 1)); - if (global_unicode()) CheckNotInSurrogatePair(0, &advance); - } - - __ Branch(&load_char_start_regexp); - } else { - __ li(v0, Operand(SUCCESS)); - } - } - // Exit and return v0. - __ bind(&exit_label_); - if (global()) { - __ lw(v0, MemOperand(frame_pointer(), kSuccessfulCaptures)); - } - - __ bind(&return_v0); - // Restore the original regexp stack pointer value (effectively, pop the - // stored base pointer). - PopRegExpBasePointer(backtrack_stackpointer(), a1); - - // Skip sp past regexp registers and local variables.. - __ mov(sp, frame_pointer()); - // Restore registers s0..s7 and return (restoring ra to pc). - __ MultiPop(registers_to_retain | ra); - __ Ret(); - - // Backtrack code (branch target for conditional backtracks). - if (backtrack_label_.is_linked()) { - __ bind(&backtrack_label_); - Backtrack(); - } - - Label exit_with_exception; - - // Preempt-code. - if (check_preempt_label_.is_linked()) { - SafeCallTarget(&check_preempt_label_); - StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0); - CallCheckStackGuardState(a0); - // If returning non-zero, we should end execution with the given - // result as return value. - __ Branch(&return_v0, ne, v0, Operand(zero_reg)); - - LoadRegExpStackPointerFromMemory(backtrack_stackpointer()); - - // String might have moved: Reload end of string from frame. - __ lw(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd)); - SafeReturn(); - } - - // Backtrack stack overflow code. - if (stack_overflow_label_.is_linked()) { - SafeCallTarget(&stack_overflow_label_); - StoreRegExpStackPointerToMemory(backtrack_stackpointer(), a0); - // Reached if the backtrack-stack limit has been hit. - - // Call GrowStack(isolate). - static constexpr int kNumArguments = 1; - __ PrepareCallCFunction(kNumArguments, a0); - __ li(a0, Operand(ExternalReference::isolate_address(masm_->isolate()))); - ExternalReference grow_stack = ExternalReference::re_grow_stack(); - __ CallCFunction(grow_stack, kNumArguments); - // If nullptr is returned, we have failed to grow the stack, and must exit - // with a stack-overflow exception. - __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg)); - // Otherwise use return value as new stack pointer. - __ mov(backtrack_stackpointer(), v0); - SafeReturn(); - } - - if (exit_with_exception.is_linked()) { - // If any of the code above needed to exit with an exception. - __ bind(&exit_with_exception); - // Exit with Result EXCEPTION(-1) to signal thrown exception. - __ li(v0, Operand(EXCEPTION)); - __ jmp(&return_v0); - } - - if (fallback_label_.is_linked()) { - __ bind(&fallback_label_); - __ li(v0, Operand(FALLBACK_TO_EXPERIMENTAL)); - __ jmp(&return_v0); - } - } - - CodeDesc code_desc; - masm_->GetCode(isolate(), &code_desc); - Handle code = - Factory::CodeBuilder(isolate(), code_desc, CodeKind::REGEXP) - .set_self_reference(masm_->CodeObject()) - .Build(); - LOG(masm_->isolate(), - RegExpCodeCreateEvent(Handle::cast(code), source)); - return Handle::cast(code); -} - - -void RegExpMacroAssemblerMIPS::GoTo(Label* to) { - if (to == nullptr) { - Backtrack(); - return; - } - __ jmp(to); - return; -} - - -void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg, - int comparand, - Label* if_ge) { - __ lw(a0, register_location(reg)); - BranchOrBacktrack(if_ge, ge, a0, Operand(comparand)); -} - - -void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg, - int comparand, - Label* if_lt) { - __ lw(a0, register_location(reg)); - BranchOrBacktrack(if_lt, lt, a0, Operand(comparand)); -} - - -void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg, - Label* if_eq) { - __ lw(a0, register_location(reg)); - BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset())); -} - - -RegExpMacroAssembler::IrregexpImplementation - RegExpMacroAssemblerMIPS::Implementation() { - return kMIPSImplementation; -} - - -void RegExpMacroAssemblerMIPS::PopCurrentPosition() { - Pop(current_input_offset()); -} - - -void RegExpMacroAssemblerMIPS::PopRegister(int register_index) { - Pop(a0); - __ sw(a0, register_location(register_index)); -} - - -void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) { - if (label->is_bound()) { - int target = label->pos(); - __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag)); - } else { - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_.get()); - Label after_constant; - __ Branch(&after_constant); - int offset = masm_->pc_offset(); - int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag; - __ emit(0); - masm_->label_at_put(label, offset); - __ bind(&after_constant); - if (is_int16(cp_offset)) { - __ lw(a0, MemOperand(code_pointer(), cp_offset)); - } else { - __ Addu(a0, code_pointer(), cp_offset); - __ lw(a0, MemOperand(a0, 0)); - } - } - Push(a0); - CheckStackLimit(); -} - - -void RegExpMacroAssemblerMIPS::PushCurrentPosition() { - Push(current_input_offset()); -} - - -void RegExpMacroAssemblerMIPS::PushRegister(int register_index, - StackCheckFlag check_stack_limit) { - __ lw(a0, register_location(register_index)); - Push(a0); - if (check_stack_limit) CheckStackLimit(); -} - - -void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) { - __ lw(current_input_offset(), register_location(reg)); -} - -void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) { - ExternalReference ref = - ExternalReference::address_of_regexp_stack_memory_top_address(isolate()); - __ li(a0, Operand(ref)); - __ Lw(a0, MemOperand(a0)); - __ Subu(a0, backtrack_stackpointer(), a0); - __ Sw(a0, register_location(reg)); -} - -void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) { - ExternalReference ref = - ExternalReference::address_of_regexp_stack_memory_top_address(isolate()); - __ li(a0, Operand(ref)); - __ Lw(a0, MemOperand(a0)); - __ lw(backtrack_stackpointer(), register_location(reg)); - __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0)); -} - - -void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) { - Label after_position; - __ Branch(&after_position, - ge, - current_input_offset(), - Operand(-by * char_size())); - __ li(current_input_offset(), -by * char_size()); - // On RegExp code entry (where this operation is used), the character before - // the current position is expected to be already loaded. - // We have advanced the position, so it's safe to read backwards. - LoadCurrentCharacterUnchecked(-1, 1); - __ bind(&after_position); -} - - -void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) { - DCHECK(register_index >= num_saved_registers_); // Reserved for positions! - __ li(a0, Operand(to)); - __ sw(a0, register_location(register_index)); -} - - -bool RegExpMacroAssemblerMIPS::Succeed() { - __ jmp(&success_label_); - return global(); -} - - -void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg, - int cp_offset) { - if (cp_offset == 0) { - __ sw(current_input_offset(), register_location(reg)); - } else { - __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size())); - __ sw(a0, register_location(reg)); - } -} - - -void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) { - DCHECK(reg_from <= reg_to); - __ lw(a0, MemOperand(frame_pointer(), kStringStartMinusOne)); - for (int reg = reg_from; reg <= reg_to; reg++) { - __ sw(a0, register_location(reg)); - } -} - -bool RegExpMacroAssemblerMIPS::CanReadUnaligned() const { return false; } - -// Private methods: - -void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) { - DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins()); - DCHECK(!masm_->options().isolate_independent_code); - - int stack_alignment = base::OS::ActivationFrameAlignment(); - - // Align the stack pointer and save the original sp value on the stack. - __ mov(scratch, sp); - __ Subu(sp, sp, Operand(kPointerSize)); - DCHECK(base::bits::IsPowerOfTwo(stack_alignment)); - __ And(sp, sp, Operand(-stack_alignment)); - __ sw(scratch, MemOperand(sp)); - - __ mov(a2, frame_pointer()); - // Code of self. - __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE); - - // We need to make room for the return address on the stack. - DCHECK(IsAligned(stack_alignment, kPointerSize)); - __ Subu(sp, sp, Operand(stack_alignment)); - - // The stack pointer now points to cell where the return address will be - // written. Arguments are in registers, meaning we treat the return address as - // argument 5. Since DirectCEntry will handle allocating space for the C - // argument slots, we don't need to care about that here. This is how the - // stack will look (sp meaning the value of sp at this moment): - // [sp + 3] - empty slot if needed for alignment. - // [sp + 2] - saved sp. - // [sp + 1] - second word reserved for return value. - // [sp + 0] - first word reserved for return value. - - // a0 will point to the return address, placed by DirectCEntry. - __ mov(a0, sp); - - ExternalReference stack_guard_check = - ExternalReference::re_check_stack_guard_state(); - __ li(t9, Operand(stack_guard_check)); - - EmbeddedData d = EmbeddedData::FromBlob(); - CHECK(Builtins::IsIsolateIndependent(Builtin::kDirectCEntry)); - Address entry = d.InstructionStartOfBuiltin(Builtin::kDirectCEntry); - __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); - __ Call(kScratchReg); - - // DirectCEntry allocated space for the C argument slots so we have to - // drop them with the return address from the stack with loading saved sp. - // At this point stack must look: - // [sp + 7] - empty slot if needed for alignment. - // [sp + 6] - saved sp. - // [sp + 5] - second word reserved for return value. - // [sp + 4] - first word reserved for return value. - // [sp + 3] - C argument slot. - // [sp + 2] - C argument slot. - // [sp + 1] - C argument slot. - // [sp + 0] - C argument slot. - __ lw(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize)); - - __ li(code_pointer(), Operand(masm_->CodeObject())); -} - - -// Helper function for reading a value out of a stack frame. -template -static T& frame_entry(Address re_frame, int frame_offset) { - return reinterpret_cast(Memory(re_frame + frame_offset)); -} - - -template -static T* frame_entry_address(Address re_frame, int frame_offset) { - return reinterpret_cast(re_frame + frame_offset); -} - -int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address, - Address raw_code, - Address re_frame) { - Code re_code = Code::cast(Object(raw_code)); - return NativeRegExpMacroAssembler::CheckStackGuardState( - frame_entry(re_frame, kIsolate), - frame_entry(re_frame, kStartIndex), - static_cast(frame_entry(re_frame, kDirectCall)), - return_address, re_code, - frame_entry_address
(re_frame, kInputString), - frame_entry_address(re_frame, kInputStart), - frame_entry_address(re_frame, kInputEnd)); -} - - -MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) { - DCHECK(register_index < (1<<30)); - if (num_registers_ <= register_index) { - num_registers_ = register_index + 1; - } - return MemOperand(frame_pointer(), - kRegisterZero - register_index * kPointerSize); -} - - -void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset, - Label* on_outside_input) { - if (cp_offset >= 0) { - BranchOrBacktrack(on_outside_input, ge, current_input_offset(), - Operand(-cp_offset * char_size())); - } else { - __ lw(a1, MemOperand(frame_pointer(), kStringStartMinusOne)); - __ Addu(a0, current_input_offset(), Operand(cp_offset * char_size())); - BranchOrBacktrack(on_outside_input, le, a0, Operand(a1)); - } -} - - -void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to, - Condition condition, - Register rs, - const Operand& rt) { - if (condition == al) { // Unconditional. - if (to == nullptr) { - Backtrack(); - return; - } - __ jmp(to); - return; - } - if (to == nullptr) { - __ Branch(&backtrack_label_, condition, rs, rt); - return; - } - __ Branch(to, condition, rs, rt); -} - - -void RegExpMacroAssemblerMIPS::SafeCall(Label* to, - Condition cond, - Register rs, - const Operand& rt) { - __ BranchAndLink(to, cond, rs, rt); -} - - -void RegExpMacroAssemblerMIPS::SafeReturn() { - __ pop(ra); - __ Addu(t5, ra, Operand(masm_->CodeObject())); - __ Jump(t5); -} - - -void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) { - __ bind(name); - __ Subu(ra, ra, Operand(masm_->CodeObject())); - __ push(ra); -} - - -void RegExpMacroAssemblerMIPS::Push(Register source) { - DCHECK(source != backtrack_stackpointer()); - __ Addu(backtrack_stackpointer(), - backtrack_stackpointer(), - Operand(-kPointerSize)); - __ sw(source, MemOperand(backtrack_stackpointer())); -} - - -void RegExpMacroAssemblerMIPS::Pop(Register target) { - DCHECK(target != backtrack_stackpointer()); - __ lw(target, MemOperand(backtrack_stackpointer())); - __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize); -} - - -void RegExpMacroAssemblerMIPS::CheckPreemption() { - // Check for preemption. - ExternalReference stack_limit = - ExternalReference::address_of_jslimit(masm_->isolate()); - __ li(a0, Operand(stack_limit)); - __ lw(a0, MemOperand(a0)); - SafeCall(&check_preempt_label_, ls, sp, Operand(a0)); -} - - -void RegExpMacroAssemblerMIPS::CheckStackLimit() { - ExternalReference stack_limit = - ExternalReference::address_of_regexp_stack_limit_address( - masm_->isolate()); - - __ li(a0, Operand(stack_limit)); - __ lw(a0, MemOperand(a0)); - SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0)); -} - - -void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset, - int characters) { - Register offset = current_input_offset(); - if (cp_offset != 0) { - // t7 is not being used to store the capture start index at this point. - __ Addu(t7, current_input_offset(), Operand(cp_offset * char_size())); - offset = t7; - } - // We assume that we cannot do unaligned loads on MIPS, so this function - // must only be used to load a single character at a time. - DCHECK_EQ(1, characters); - __ Addu(t5, end_of_input_address(), Operand(offset)); - if (mode_ == LATIN1) { - __ lbu(current_character(), MemOperand(t5, 0)); - } else { - DCHECK_EQ(UC16, mode_); - __ lhu(current_character(), MemOperand(t5, 0)); - } -} - - -#undef __ - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_MIPS diff --git a/src/regexp/mips/regexp-macro-assembler-mips.h b/src/regexp/mips/regexp-macro-assembler-mips.h deleted file mode 100644 index 79bbe4bc4d..0000000000 --- a/src/regexp/mips/regexp-macro-assembler-mips.h +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ -#define V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ - -#include "src/codegen/macro-assembler.h" -#include "src/regexp/regexp-macro-assembler.h" - -namespace v8 { -namespace internal { - -class V8_EXPORT_PRIVATE RegExpMacroAssemblerMIPS - : public NativeRegExpMacroAssembler { - public: - RegExpMacroAssemblerMIPS(Isolate* isolate, Zone* zone, Mode mode, - int registers_to_save); - ~RegExpMacroAssemblerMIPS() override; - int stack_limit_slack() override; - void AdvanceCurrentPosition(int by) override; - void AdvanceRegister(int reg, int by) override; - void Backtrack() override; - void Bind(Label* label) override; - void CheckAtStart(int cp_offset, Label* on_at_start) override; - void CheckCharacter(uint32_t c, Label* on_equal) override; - void CheckCharacterAfterAnd(uint32_t c, uint32_t mask, - Label* on_equal) override; - void CheckCharacterGT(base::uc16 limit, Label* on_greater) override; - void CheckCharacterLT(base::uc16 limit, Label* on_less) override; - // A "greedy loop" is a loop that is both greedy and with a simple - // body. It has a particularly simple implementation. - void CheckGreedyLoop(Label* on_tos_equals_current_position) override; - void CheckNotAtStart(int cp_offset, Label* on_not_at_start) override; - void CheckNotBackReference(int start_reg, bool read_backward, - Label* on_no_match) override; - void CheckNotBackReferenceIgnoreCase(int start_reg, bool read_backward, - bool unicode, - Label* on_no_match) override; - void CheckNotCharacter(uint32_t c, Label* on_not_equal) override; - void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask, - Label* on_not_equal) override; - void CheckNotCharacterAfterMinusAnd(base::uc16 c, base::uc16 minus, - base::uc16 mask, - Label* on_not_equal) override; - void CheckCharacterInRange(base::uc16 from, base::uc16 to, - Label* on_in_range) override; - void CheckCharacterNotInRange(base::uc16 from, base::uc16 to, - Label* on_not_in_range) override; - bool CheckCharacterInRangeArray(const ZoneList* ranges, - Label* on_in_range) override; - bool CheckCharacterNotInRangeArray(const ZoneList* ranges, - Label* on_not_in_range) override; - void CheckBitInTable(Handle table, Label* on_bit_set) override; - - // Checks whether the given offset from the current position is before - // the end of the string. - void CheckPosition(int cp_offset, Label* on_outside_input) override; - bool CheckSpecialCharacterClass(StandardCharacterSet type, - Label* on_no_match) override; - void Fail() override; - Handle GetCode(Handle source) override; - void GoTo(Label* label) override; - void IfRegisterGE(int reg, int comparand, Label* if_ge) override; - void IfRegisterLT(int reg, int comparand, Label* if_lt) override; - void IfRegisterEqPos(int reg, Label* if_eq) override; - IrregexpImplementation Implementation() override; - void LoadCurrentCharacterUnchecked(int cp_offset, - int character_count) override; - void PopCurrentPosition() override; - void PopRegister(int register_index) override; - void PushBacktrack(Label* label) override; - void PushCurrentPosition() override; - void PushRegister(int register_index, - StackCheckFlag check_stack_limit) override; - void ReadCurrentPositionFromRegister(int reg) override; - void ReadStackPointerFromRegister(int reg) override; - void SetCurrentPositionFromEnd(int by) override; - void SetRegister(int register_index, int to) override; - bool Succeed() override; - void WriteCurrentPositionToRegister(int reg, int cp_offset) override; - void ClearRegisters(int reg_from, int reg_to) override; - void WriteStackPointerToRegister(int reg) override; - bool CanReadUnaligned() const override; - - // Called from RegExp if the stack-guard is triggered. - // If the code object is relocated, the return address is fixed before - // returning. - // {raw_code} is an Address because this is called via ExternalReference. - static int CheckStackGuardState(Address* return_address, Address raw_code, - Address re_frame); - - private: - // Offsets from frame_pointer() of function parameters and stored registers. - static const int kFramePointer = 0; - - // Above the frame pointer - Stored registers and stack passed parameters. - static const int kStoredRegisters = kFramePointer; - // Return address (stored from link register, read into pc on return). - static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize; - // Stack frame header. - static const int kStackFrameHeader = kReturnAddress; - // Stack parameters placed by caller. - static const int kRegisterOutput = kStackFrameHeader + 20; - static const int kNumOutputRegisters = kRegisterOutput + kPointerSize; - static const int kDirectCall = kNumOutputRegisters + kPointerSize; - static const int kIsolate = kDirectCall + kPointerSize; - - // Below the frame pointer. - // Register parameters stored by setup code. - static const int kInputEnd = kFramePointer - kPointerSize; - static const int kInputStart = kInputEnd - kPointerSize; - static const int kStartIndex = kInputStart - kPointerSize; - static const int kInputString = kStartIndex - kPointerSize; - // When adding local variables remember to push space for them in - // the frame in GetCode. - static const int kSuccessfulCaptures = kInputString - kPointerSize; - static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize; - static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize; - // Stores the initial value of the regexp stack pointer in a - // position-independent representation (in case the regexp stack grows and - // thus moves). - static const int kRegExpStackBasePointer = - kBacktrackCount - kSystemPointerSize; - - // First register address. Following registers are below it on the stack. - static const int kRegisterZero = kRegExpStackBasePointer - kSystemPointerSize; - - // Initial size of code buffer. - static const int kRegExpCodeSize = 1024; - - void PushCallerSavedRegisters(); - void PopCallerSavedRegisters(); - - // Check whether preemption has been requested. - void CheckPreemption(); - - // Check whether we are exceeding the stack limit on the backtrack stack. - void CheckStackLimit(); - - // Generate a call to CheckStackGuardState. - void CallCheckStackGuardState(Register scratch); - void CallIsCharacterInRangeArray(const ZoneList* ranges); - - // The ebp-relative location of a regexp register. - MemOperand register_location(int register_index); - - // Register holding the current input position as negative offset from - // the end of the string. - static constexpr Register current_input_offset() { return s2; } - - // The register containing the current character after LoadCurrentCharacter. - static constexpr Register current_character() { return s5; } - - // Register holding address of the end of the input string. - static constexpr Register end_of_input_address() { return s6; } - - // Register holding the frame address. Local variables, parameters and - // regexp registers are addressed relative to this. - static constexpr Register frame_pointer() { return fp; } - - // The register containing the backtrack stack top. Provides a meaningful - // name to the register. - static constexpr Register backtrack_stackpointer() { return s7; } - - // Register holding pointer to the current code object. - static constexpr Register code_pointer() { return s1; } - - // Byte size of chars in the string to match (decided by the Mode argument). - inline int char_size() const { return static_cast(mode_); } - - // Equivalent to a conditional branch to the label, unless the label - // is nullptr, in which case it is a conditional Backtrack. - void BranchOrBacktrack(Label* to, - Condition condition, - Register rs, - const Operand& rt); - - // Call and return internally in the generated code in a way that - // is GC-safe (i.e., doesn't leave absolute code addresses on the stack) - inline void SafeCall(Label* to, - Condition cond, - Register rs, - const Operand& rt); - inline void SafeReturn(); - inline void SafeCallTarget(Label* name); - - // Pushes the value of a register on the backtrack stack. Decrements the - // stack pointer by a word size and stores the register's value there. - inline void Push(Register source); - - // Pops a value from the backtrack stack. Reads the word at the stack pointer - // and increments it by a word size. - inline void Pop(Register target); - - void LoadRegExpStackPointerFromMemory(Register dst); - void StoreRegExpStackPointerToMemory(Register src, Register scratch); - void PushRegExpBasePointer(Register stack_pointer, Register scratch); - void PopRegExpBasePointer(Register stack_pointer_out, Register scratch); - - Isolate* isolate() const { return masm_->isolate(); } - - const std::unique_ptr masm_; - const NoRootArrayScope no_root_array_scope_; - - // Which mode to generate code for (Latin1 or UC16). - const Mode mode_; - - // One greater than maximal register index actually used. - int num_registers_; - - // Number of registers to output at the end (the saved registers - // are always 0..num_saved_registers_-1). - const int num_saved_registers_; - - // Labels used internally. - Label entry_label_; - Label start_label_; - Label success_label_; - Label backtrack_label_; - Label exit_label_; - Label check_preempt_label_; - Label stack_overflow_label_; - Label internal_failure_label_; - Label fallback_label_; -}; - -} // namespace internal -} // namespace v8 - -#endif // V8_REGEXP_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_ diff --git a/src/regexp/regexp-macro-assembler-arch.h b/src/regexp/regexp-macro-assembler-arch.h index 101f2412c3..7e816ee85b 100644 --- a/src/regexp/regexp-macro-assembler-arch.h +++ b/src/regexp/regexp-macro-assembler-arch.h @@ -17,8 +17,6 @@ #include "src/regexp/arm/regexp-macro-assembler-arm.h" #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/regexp/ppc/regexp-macro-assembler-ppc.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/regexp/mips/regexp-macro-assembler-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/regexp/mips64/regexp-macro-assembler-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/regexp/regexp.cc b/src/regexp/regexp.cc index c0e439beae..71301ab965 100644 --- a/src/regexp/regexp.cc +++ b/src/regexp/regexp.cc @@ -940,9 +940,6 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data, #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 macro_assembler.reset(new RegExpMacroAssemblerPPC(isolate, zone, mode, output_register_count)); -#elif V8_TARGET_ARCH_MIPS - macro_assembler.reset(new RegExpMacroAssemblerMIPS(isolate, zone, mode, - output_register_count)); #elif V8_TARGET_ARCH_MIPS64 macro_assembler.reset(new RegExpMacroAssemblerMIPS(isolate, zone, mode, output_register_count)); diff --git a/src/runtime/runtime-atomics.cc b/src/runtime/runtime-atomics.cc index fa6565bfd5..d2b371efb3 100644 --- a/src/runtime/runtime-atomics.cc +++ b/src/runtime/runtime-atomics.cc @@ -20,9 +20,8 @@ namespace v8 { namespace internal { // Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h. -#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ - V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \ - V8_TARGET_ARCH_LOONG64 +#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_PPC || \ + V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_LOONG64 namespace { @@ -611,7 +610,7 @@ RUNTIME_FUNCTION(Runtime_AtomicsOr) { UNREACHABLE(); } RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); } -#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 +#endif // V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X // || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64 || // V8_TARGET_ARCH_RISCV32 diff --git a/src/snapshot/deserializer.h b/src/snapshot/deserializer.h index 4c55efc979..130125a231 100644 --- a/src/snapshot/deserializer.h +++ b/src/snapshot/deserializer.h @@ -29,10 +29,10 @@ class Object; // Used for platforms with embedded constant pools to trigger deserialization // of objects found in code. -#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ - defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_S390) || \ - defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV32) || \ - defined(V8_TARGET_ARCH_RISCV64) || V8_EMBEDDED_CONSTANT_POOL +#if defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \ + defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_PPC64) || \ + defined(V8_TARGET_ARCH_RISCV32) || defined(V8_TARGET_ARCH_RISCV64) || \ + V8_EMBEDDED_CONSTANT_POOL #define V8_CODE_EMBEDS_OBJECT_POINTER 1 #else #define V8_CODE_EMBEDS_OBJECT_POINTER 0 diff --git a/src/snapshot/embedded/embedded-data.cc b/src/snapshot/embedded/embedded-data.cc index 40ff81a06b..118eb8b581 100644 --- a/src/snapshot/embedded/embedded-data.cc +++ b/src/snapshot/embedded/embedded-data.cc @@ -223,11 +223,10 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) { RelocIterator on_heap_it(code, kRelocMask); RelocIterator off_heap_it(blob, code, kRelocMask); -#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ - defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \ - defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390) || \ - defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \ - defined(V8_TARGET_ARCH_RISCV32) +#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ + defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_IA32) || \ + defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_RISCV64) || \ + defined(V8_TARGET_ARCH_LOONG64) || defined(V8_TARGET_ARCH_RISCV32) // On these platforms we emit relative builtin-to-builtin // jumps for isolate independent builtins in the snapshot. This fixes up the // relative jumps to the right offsets in the snapshot. diff --git a/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/src/snapshot/embedded/platform-embedded-file-writer-generic.cc index 2ee2b8bee0..ad4c088124 100644 --- a/src/snapshot/embedded/platform-embedded-file-writer-generic.cc +++ b/src/snapshot/embedded/platform-embedded-file-writer-generic.cc @@ -163,8 +163,7 @@ int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective( DataDirective PlatformEmbeddedFileWriterGeneric::ByteChunkDataDirective() const { -#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \ - defined(V8_TARGET_ARCH_LOONG64) +#if defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_LOONG64) // MIPS and LOONG64 uses a fixed 4 byte instruction set, using .long // to prevent any unnecessary padding. return kLong; diff --git a/src/utils/memcopy.h b/src/utils/memcopy.h index bc14556558..3227c5dfb0 100644 --- a/src/utils/memcopy.h +++ b/src/utils/memcopy.h @@ -20,7 +20,7 @@ namespace internal { using Address = uintptr_t; // ---------------------------------------------------------------------------- -// Generated memcpy/memmove for ia32, arm, and mips. +// Generated memcpy/memmove for ia32 and arm. void init_memcopy_functions(); @@ -59,24 +59,6 @@ V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, // For values < 12, the assembler function is slower than the inlined C code. const int kMinComplexConvertMemCopy = 12; -#elif defined(V8_HOST_ARCH_MIPS) -using MemCopyUint8Function = void (*)(uint8_t* dest, const uint8_t* src, - size_t size); -V8_EXPORT_PRIVATE extern MemCopyUint8Function memcopy_uint8_function; -V8_INLINE void MemCopyUint8Wrapper(uint8_t* dest, const uint8_t* src, - size_t chars) { - memcpy(dest, src, chars); -} -// For values < 16, the assembler function is slower than the inlined C code. -const size_t kMinComplexMemCopy = 16; -V8_INLINE void MemCopy(void* dest, const void* src, size_t size) { - (*memcopy_uint8_function)(reinterpret_cast(dest), - reinterpret_cast(src), size); -} -V8_EXPORT_PRIVATE V8_INLINE void MemMove(void* dest, const void* src, - size_t size) { - memmove(dest, src, size); -} #else // Copy memory area to disjoint memory area. inline void MemCopy(void* dest, const void* src, size_t size) { diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index 562cadd612..91e957c31a 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -1869,8 +1869,6 @@ bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b); #include "src/wasm/baseline/arm/liftoff-assembler-arm.h" #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 #include "src/wasm/baseline/ppc/liftoff-assembler-ppc.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/wasm/baseline/mips/liftoff-assembler-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index 1652f1678a..f3748f7dd6 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -320,8 +320,8 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail, } // Some externally maintained architectures don't fully implement Liftoff yet. -#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \ - V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64 +#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_PPC || \ + V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64 return; #endif diff --git a/src/wasm/baseline/mips/liftoff-assembler-mips.h b/src/wasm/baseline/mips/liftoff-assembler-mips.h deleted file mode 100644 index 8bad92d522..0000000000 --- a/src/wasm/baseline/mips/liftoff-assembler-mips.h +++ /dev/null @@ -1,3237 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_ -#define V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_ - -#include "src/heap/memory-chunk.h" -#include "src/wasm/baseline/liftoff-assembler.h" -#include "src/wasm/wasm-objects.h" - -namespace v8 { -namespace internal { -namespace wasm { - -namespace liftoff { - -inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) { - switch (liftoff_cond) { - case kEqual: - return eq; - case kUnequal: - return ne; - case kSignedLessThan: - return lt; - case kSignedLessEqual: - return le; - case kSignedGreaterThan: - return gt; - case kSignedGreaterEqual: - return ge; - case kUnsignedLessThan: - return ult; - case kUnsignedLessEqual: - return ule; - case kUnsignedGreaterThan: - return ugt; - case kUnsignedGreaterEqual: - return uge; - } -} - -// half -// slot Frame -// -----+--------------------+--------------------------- -// n+3 | parameter n | -// ... | ... | -// 4 | parameter 1 | or parameter 2 -// 3 | parameter 0 | or parameter 1 -// 2 | (result address) | or parameter 0 -// -----+--------------------+--------------------------- -// 1 | return addr (ra) | -// 0 | previous frame (fp)| -// -----+--------------------+ <-- frame ptr (fp) -// -1 | StackFrame::WASM | -// -2 | instance | -// -3 | feedback vector | -// -4 | tiering budget | -// -----+--------------------+--------------------------- -// -5 | slot 0 (high) | ^ -// -6 | slot 0 (low) | | -// -7 | slot 1 (high) | Frame slots -// -8 | slot 1 (low) | | -// | | v -// -----+--------------------+ <-- stack ptr (sp) -// -#if defined(V8_TARGET_BIG_ENDIAN) -constexpr int32_t kLowWordOffset = 4; -constexpr int32_t kHighWordOffset = 0; -#else -constexpr int32_t kLowWordOffset = 0; -constexpr int32_t kHighWordOffset = 4; -#endif - -constexpr int kInstanceOffset = 2 * kSystemPointerSize; -constexpr int kFeedbackVectorOffset = 3 * kSystemPointerSize; - -inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); } - -inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) { - int32_t half_offset = - half == kLowWord ? 0 : LiftoffAssembler::kStackSlotSize / 2; - return MemOperand(offset > 0 ? fp : sp, -offset + half_offset); -} - -inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); } - -inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, Register base, - int32_t offset, ValueKind kind) { - MemOperand src(base, offset); - switch (kind) { - case kI32: - case kRef: - case kRefNull: - case kRtt: - assm->lw(dst.gp(), src); - break; - case kI64: - assm->lw(dst.low_gp(), - MemOperand(base, offset + liftoff::kLowWordOffset)); - assm->lw(dst.high_gp(), - MemOperand(base, offset + liftoff::kHighWordOffset)); - break; - case kF32: - assm->lwc1(dst.fp(), src); - break; - case kF64: - assm->Ldc1(dst.fp(), src); - break; - default: - UNREACHABLE(); - } -} - -inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, - LiftoffRegister src, ValueKind kind) { - MemOperand dst(base, offset); - switch (kind) { - case kI32: - case kRefNull: - case kRef: - case kRtt: - assm->Usw(src.gp(), dst); - break; - case kI64: - assm->Usw(src.low_gp(), - MemOperand(base, offset + liftoff::kLowWordOffset)); - assm->Usw(src.high_gp(), - MemOperand(base, offset + liftoff::kHighWordOffset)); - break; - case kF32: - assm->Uswc1(src.fp(), dst, t8); - break; - case kF64: - assm->Usdc1(src.fp(), dst, t8); - break; - default: - UNREACHABLE(); - } -} - -inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) { - switch (kind) { - case kI32: - case kRefNull: - case kRef: - case kRtt: - assm->push(reg.gp()); - break; - case kI64: - assm->Push(reg.high_gp(), reg.low_gp()); - break; - case kF32: - assm->addiu(sp, sp, -sizeof(float)); - assm->swc1(reg.fp(), MemOperand(sp, 0)); - break; - case kF64: - assm->addiu(sp, sp, -sizeof(double)); - assm->Sdc1(reg.fp(), MemOperand(sp, 0)); - break; - default: - UNREACHABLE(); - } -} - -inline Register EnsureNoAlias(Assembler* assm, Register reg, - LiftoffRegister must_not_alias, - UseScratchRegisterScope* temps) { - if (reg != must_not_alias.low_gp() && reg != must_not_alias.high_gp()) - return reg; - Register tmp = temps->Acquire(); - DCHECK_NE(must_not_alias.low_gp(), tmp); - DCHECK_NE(must_not_alias.high_gp(), tmp); - assm->movz(tmp, reg, zero_reg); - return tmp; -} - -#if defined(V8_TARGET_BIG_ENDIAN) -inline void ChangeEndiannessLoad(LiftoffAssembler* assm, LiftoffRegister dst, - LoadType type, LiftoffRegList pinned) { - bool is_float = false; - LiftoffRegister tmp = dst; - switch (type.value()) { - case LoadType::kI64Load8U: - case LoadType::kI64Load8S: - case LoadType::kI32Load8U: - case LoadType::kI32Load8S: - // No need to change endianness for byte size. - return; - case LoadType::kF32Load: - is_float = true; - tmp = assm->GetUnusedRegister(kGpReg, pinned); - assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, dst); - V8_FALLTHROUGH; - case LoadType::kI32Load: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); - break; - case LoadType::kI32Load16S: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); - break; - case LoadType::kI32Load16U: - assm->TurboAssembler::ByteSwapUnsigned(tmp.gp(), tmp.gp(), 2); - break; - case LoadType::kF64Load: - is_float = true; - tmp = assm->GetUnusedRegister(kGpRegPair, pinned); - assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, dst); - V8_FALLTHROUGH; - case LoadType::kI64Load: - assm->TurboAssembler::Move(kScratchReg, tmp.low_gp()); - assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); - assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4); - break; - case LoadType::kI64Load16U: - assm->TurboAssembler::ByteSwapUnsigned(tmp.low_gp(), tmp.low_gp(), 2); - assm->TurboAssembler::Move(tmp.high_gp(), zero_reg); - break; - case LoadType::kI64Load16S: - assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 2); - assm->sra(tmp.high_gp(), tmp.low_gp(), 31); - break; - case LoadType::kI64Load32U: - assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4); - assm->TurboAssembler::Move(tmp.high_gp(), zero_reg); - break; - case LoadType::kI64Load32S: - assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4); - assm->sra(tmp.high_gp(), tmp.low_gp(), 31); - break; - default: - UNREACHABLE(); - } - - if (is_float) { - switch (type.value()) { - case LoadType::kF32Load: - assm->emit_type_conversion(kExprF32ReinterpretI32, dst, tmp); - break; - case LoadType::kF64Load: - assm->emit_type_conversion(kExprF64ReinterpretI64, dst, tmp); - break; - default: - UNREACHABLE(); - } - } -} - -inline void ChangeEndiannessStore(LiftoffAssembler* assm, LiftoffRegister src, - StoreType type, LiftoffRegList pinned) { - bool is_float = false; - LiftoffRegister tmp = src; - switch (type.value()) { - case StoreType::kI64Store8: - case StoreType::kI32Store8: - // No need to change endianness for byte size. - return; - case StoreType::kF32Store: - is_float = true; - tmp = assm->GetUnusedRegister(kGpReg, pinned); - assm->emit_type_conversion(kExprI32ReinterpretF32, tmp, src); - V8_FALLTHROUGH; - case StoreType::kI32Store: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 4); - break; - case StoreType::kI32Store16: - assm->TurboAssembler::ByteSwapSigned(tmp.gp(), tmp.gp(), 2); - break; - case StoreType::kF64Store: - is_float = true; - tmp = assm->GetUnusedRegister(kGpRegPair, pinned); - assm->emit_type_conversion(kExprI64ReinterpretF64, tmp, src); - V8_FALLTHROUGH; - case StoreType::kI64Store: - assm->TurboAssembler::Move(kScratchReg, tmp.low_gp()); - assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.high_gp(), 4); - assm->TurboAssembler::ByteSwapSigned(tmp.high_gp(), kScratchReg, 4); - break; - case StoreType::kI64Store32: - assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 4); - break; - case StoreType::kI64Store16: - assm->TurboAssembler::ByteSwapSigned(tmp.low_gp(), tmp.low_gp(), 2); - break; - default: - UNREACHABLE(); - } - - if (is_float) { - switch (type.value()) { - case StoreType::kF32Store: - assm->emit_type_conversion(kExprF32ReinterpretI32, src, tmp); - break; - case StoreType::kF64Store: - assm->emit_type_conversion(kExprF64ReinterpretI64, src, tmp); - break; - default: - UNREACHABLE(); - } - } -} -#endif // V8_TARGET_BIG_ENDIAN - -} // namespace liftoff - -int LiftoffAssembler::PrepareStackFrame() { - int offset = pc_offset(); - // When the frame size is bigger than 4KB, we need seven instructions for - // stack checking, so we reserve space for this case. - addiu(sp, sp, 0); - nop(); - nop(); - nop(); - nop(); - nop(); - nop(); - return offset; -} - -void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params, - int stack_param_delta) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - - // Push the return address and frame pointer to complete the stack frame. - Lw(scratch, MemOperand(fp, 4)); - Push(scratch); - Lw(scratch, MemOperand(fp, 0)); - Push(scratch); - - // Shift the whole frame upwards. - int slot_count = num_callee_stack_params + 2; - for (int i = slot_count - 1; i >= 0; --i) { - Lw(scratch, MemOperand(sp, i * 4)); - Sw(scratch, MemOperand(fp, (i - stack_param_delta) * 4)); - } - - // Set the new stack and frame pointer. - addiu(sp, fp, -stack_param_delta * 4); - Pop(ra, fp); -} - -void LiftoffAssembler::AlignFrameSize() {} - -void LiftoffAssembler::PatchPrepareStackFrame( - int offset, SafepointTableBuilder* safepoint_table_builder) { - // The frame_size includes the frame marker and the instance slot. Both are - // pushed as part of frame construction, so we don't need to allocate memory - // for them anymore. - int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize; - - // We can't run out of space, just pass anything big enough to not cause the - // assembler to try to grow the buffer. - constexpr int kAvailableSpace = 256; - TurboAssembler patching_assembler( - nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, - ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); - - if (V8_LIKELY(frame_size < 4 * KB)) { - // This is the standard case for small frames: just subtract from SP and be - // done with it. - patching_assembler.Addu(sp, sp, Operand(-frame_size)); - return; - } - - // The frame size is bigger than 4KB, so we might overflow the available stack - // space if we first allocate the frame and then do the stack check (we will - // need some remaining stack space for throwing the exception). That's why we - // check the available stack space before we allocate the frame. To do this we - // replace the {__ Addu(sp, sp, -framesize)} with a jump to OOL code that does - // this "extended stack check". - // - // The OOL code can simply be generated here with the normal assembler, - // because all other code generation, including OOL code, has already finished - // when {PatchPrepareStackFrame} is called. The function prologue then jumps - // to the current {pc_offset()} to execute the OOL code for allocating the - // large frame. - // Emit the unconditional branch in the function prologue (from {offset} to - // {pc_offset()}). - - int imm32 = pc_offset() - offset - 3 * kInstrSize; - patching_assembler.BranchLong(imm32); - - // If the frame is bigger than the stack, we throw the stack overflow - // exception unconditionally. Thereby we can avoid the integer overflow - // check in the condition code. - RecordComment("OOL: stack check for large frame"); - Label continuation; - if (frame_size < v8_flags.stack_size * 1024) { - Register stack_limit = kScratchReg; - Lw(stack_limit, - FieldMemOperand(kWasmInstanceRegister, - WasmInstanceObject::kRealStackLimitAddressOffset)); - Lw(stack_limit, MemOperand(stack_limit)); - Addu(stack_limit, stack_limit, Operand(frame_size)); - Branch(&continuation, uge, sp, Operand(stack_limit)); - } - - Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL); - // The call will not return; just define an empty safepoint. - safepoint_table_builder->DefineSafepoint(this); - if (v8_flags.debug_code) stop(); - - bind(&continuation); - - // Now allocate the stack space. Note that this might do more than just - // decrementing the SP; - Addu(sp, sp, Operand(-frame_size)); - - // Jump back to the start of the function, from {pc_offset()} to - // right after the reserved space for the {__ Addu(sp, sp, -framesize)} (which - // is a jump now). - int func_start_offset = offset + 7 * kInstrSize; - imm32 = func_start_offset - pc_offset() - 3 * kInstrSize; - BranchLong(imm32); -} - -void LiftoffAssembler::FinishCode() {} - -void LiftoffAssembler::AbortCompilation() {} - -// static -constexpr int LiftoffAssembler::StaticStackFrameSize() { - return liftoff::kFeedbackVectorOffset; -} - -int LiftoffAssembler::SlotSizeForType(ValueKind kind) { - switch (kind) { - case kS128: - return value_kind_size(kind); - default: - return kStackSlotSize; - } -} - -bool LiftoffAssembler::NeedsAlignment(ValueKind kind) { - return kind == kS128 || is_reference(kind); -} - -void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, - RelocInfo::Mode rmode) { - switch (value.type().kind()) { - case kI32: - TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); - break; - case kI64: { - DCHECK(RelocInfo::IsNoInfo(rmode)); - int32_t low_word = value.to_i64(); - int32_t high_word = value.to_i64() >> 32; - TurboAssembler::li(reg.low_gp(), Operand(low_word)); - TurboAssembler::li(reg.high_gp(), Operand(high_word)); - break; - } - case kF32: - TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits()); - break; - case kF64: - TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits()); - break; - default: - UNREACHABLE(); - } -} - -void LiftoffAssembler::LoadInstanceFromFrame(Register dst) { - lw(dst, liftoff::GetInstanceOperand()); -} - -void LiftoffAssembler::LoadFromInstance(Register dst, Register instance, - int32_t offset, int size) { - DCHECK_LE(0, offset); - switch (size) { - case 1: - lb(dst, MemOperand(instance, offset)); - break; - case 4: - lw(dst, MemOperand(instance, offset)); - break; - default: - UNIMPLEMENTED(); - } -} - -void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, - Register instance, - int32_t offset) { - static_assert(kTaggedSize == kSystemPointerSize); - lw(dst, MemOperand(instance, offset)); -} - -void LiftoffAssembler::SpillInstance(Register instance) { - sw(instance, liftoff::GetInstanceOperand()); -} - -void LiftoffAssembler::ResetOSRTarget() {} - -void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, - Register offset_reg, - int32_t offset_imm, - LiftoffRegList pinned) { - static_assert(kTaggedSize == kInt32Size); - Load(LiftoffRegister(dst), src_addr, offset_reg, - static_cast(offset_imm), LoadType::kI32Load, pinned); -} - -void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr, - int32_t offset_imm) { - MemOperand src_op = MemOperand(src_addr, offset_imm); - lw(dst, src_op); -} - -void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, - Register offset_reg, - int32_t offset_imm, - LiftoffRegister src, - LiftoffRegList pinned, - SkipWriteBarrier skip_write_barrier) { - static_assert(kTaggedSize == kInt32Size); - Register dst = no_reg; - if (offset_reg != no_reg) { - dst = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); - emit_ptrsize_add(dst, dst_addr, offset_reg); - } - MemOperand dst_op = (offset_reg != no_reg) ? MemOperand(dst, offset_imm) - : MemOperand(dst_addr, offset_imm); - Sw(src.gp(), dst_op); - - if (skip_write_barrier || v8_flags.disable_write_barriers) return; - - // The write barrier. - Label write_barrier; - Label exit; - Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); - CheckPageFlag(dst_addr, scratch, - MemoryChunk::kPointersFromHereAreInterestingMask, ne, - &write_barrier); - Branch(&exit); - bind(&write_barrier); - JumpIfSmi(src.gp(), &exit); - CheckPageFlag(src.gp(), scratch, - MemoryChunk::kPointersToHereAreInterestingMask, eq, &exit); - Addu(scratch, dst_op.rm(), dst_op.offset()); - CallRecordWriteStubSaveRegisters( - dst_addr, scratch, RememberedSetAction::kEmit, SaveFPRegsMode::kSave, - StubCallMode::kCallWasmRuntimeStub); - bind(&exit); -} - -void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, - Register offset_reg, uint32_t offset_imm, - LoadType type, LiftoffRegList pinned, - uint32_t* protected_load_pc, bool is_load_mem, - bool i64_offset) { - Register src = no_reg; - if (offset_reg != no_reg) { - src = GetUnusedRegister(kGpReg, pinned).gp(); - emit_ptrsize_add(src, src_addr, offset_reg); - } - MemOperand src_op = (offset_reg != no_reg) ? MemOperand(src, offset_imm) - : MemOperand(src_addr, offset_imm); - - if (protected_load_pc) *protected_load_pc = pc_offset(); - switch (type.value()) { - case LoadType::kI32Load8U: - lbu(dst.gp(), src_op); - break; - case LoadType::kI64Load8U: - lbu(dst.low_gp(), src_op); - xor_(dst.high_gp(), dst.high_gp(), dst.high_gp()); - break; - case LoadType::kI32Load8S: - lb(dst.gp(), src_op); - break; - case LoadType::kI64Load8S: - lb(dst.low_gp(), src_op); - TurboAssembler::Move(dst.high_gp(), dst.low_gp()); - sra(dst.high_gp(), dst.high_gp(), 31); - break; - case LoadType::kI32Load16U: - TurboAssembler::Ulhu(dst.gp(), src_op); - break; - case LoadType::kI64Load16U: - TurboAssembler::Ulhu(dst.low_gp(), src_op); - xor_(dst.high_gp(), dst.high_gp(), dst.high_gp()); - break; - case LoadType::kI32Load16S: - TurboAssembler::Ulh(dst.gp(), src_op); - break; - case LoadType::kI64Load16S: - TurboAssembler::Ulh(dst.low_gp(), src_op); - TurboAssembler::Move(dst.high_gp(), dst.low_gp()); - sra(dst.high_gp(), dst.high_gp(), 31); - break; - case LoadType::kI32Load: - TurboAssembler::Ulw(dst.gp(), src_op); - break; - case LoadType::kI64Load32U: - TurboAssembler::Ulw(dst.low_gp(), src_op); - xor_(dst.high_gp(), dst.high_gp(), dst.high_gp()); - break; - case LoadType::kI64Load32S: - TurboAssembler::Ulw(dst.low_gp(), src_op); - TurboAssembler::Move(dst.high_gp(), dst.low_gp()); - sra(dst.high_gp(), dst.high_gp(), 31); - break; - case LoadType::kI64Load: { - MemOperand src_op = - (offset_reg != no_reg) - ? MemOperand(src, offset_imm + liftoff::kLowWordOffset) - : MemOperand(src_addr, offset_imm + liftoff::kLowWordOffset); - MemOperand src_op_upper = - (offset_reg != no_reg) - ? MemOperand(src, offset_imm + liftoff::kHighWordOffset) - : MemOperand(src_addr, offset_imm + liftoff::kHighWordOffset); - { - UseScratchRegisterScope temps(this); - Register temp = dst.low_gp(); - if (dst.low_gp() == src_op_upper.rm()) temp = temps.Acquire(); - TurboAssembler::Ulw(temp, src_op); - TurboAssembler::Ulw(dst.high_gp(), src_op_upper); - if (dst.low_gp() == src_op_upper.rm()) mov(dst.low_gp(), temp); - } - break; - } - case LoadType::kF32Load: - TurboAssembler::Ulwc1(dst.fp(), src_op, t8); - break; - case LoadType::kF64Load: - TurboAssembler::Uldc1(dst.fp(), src_op, t8); - break; - default: - UNREACHABLE(); - } - -#if defined(V8_TARGET_BIG_ENDIAN) - if (is_load_mem) { - pinned.set(src_op.rm()); - liftoff::ChangeEndiannessLoad(this, dst, type, pinned); - } -#endif -} - -void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, - uint32_t offset_imm, LiftoffRegister src, - StoreType type, LiftoffRegList pinned, - uint32_t* protected_store_pc, bool is_store_mem) { - Register dst = no_reg; - MemOperand dst_op = MemOperand(dst_addr, offset_imm); - if (offset_reg != no_reg) { - if (is_store_mem) { - pinned.set(src); - } - dst = GetUnusedRegister(kGpReg, pinned).gp(); - emit_ptrsize_add(dst, dst_addr, offset_reg); - dst_op = MemOperand(dst, offset_imm); - } - -#if defined(V8_TARGET_BIG_ENDIAN) - if (is_store_mem) { - pinned = pinned | LiftoffRegList{dst_op.rm(), src}; - LiftoffRegister tmp = GetUnusedRegister(src.reg_class(), pinned); - // Save original value. - Move(tmp, src, type.value_type()); - - src = tmp; - pinned.set(tmp); - liftoff::ChangeEndiannessStore(this, src, type, pinned); - } -#endif - - if (protected_store_pc) *protected_store_pc = pc_offset(); - switch (type.value()) { - case StoreType::kI64Store8: - src = src.low(); - V8_FALLTHROUGH; - case StoreType::kI32Store8: - sb(src.gp(), dst_op); - break; - case StoreType::kI64Store16: - src = src.low(); - V8_FALLTHROUGH; - case StoreType::kI32Store16: - TurboAssembler::Ush(src.gp(), dst_op, t8); - break; - case StoreType::kI64Store32: - src = src.low(); - V8_FALLTHROUGH; - case StoreType::kI32Store: - TurboAssembler::Usw(src.gp(), dst_op); - break; - case StoreType::kI64Store: { - MemOperand dst_op_lower(dst_op.rm(), - offset_imm + liftoff::kLowWordOffset); - MemOperand dst_op_upper(dst_op.rm(), - offset_imm + liftoff::kHighWordOffset); - TurboAssembler::Usw(src.low_gp(), dst_op_lower); - TurboAssembler::Usw(src.high_gp(), dst_op_upper); - break; - } - case StoreType::kF32Store: - TurboAssembler::Uswc1(src.fp(), dst_op, t8); - break; - case StoreType::kF64Store: - TurboAssembler::Usdc1(src.fp(), dst_op, t8); - break; - default: - UNREACHABLE(); - } -} - -void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, - Register offset_reg, uint32_t offset_imm, - LoadType type, LiftoffRegList pinned) { - bailout(kAtomics, "AtomicLoad"); -} - -void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg, - uint32_t offset_imm, LiftoffRegister src, - StoreType type, LiftoffRegList pinned) { - bailout(kAtomics, "AtomicStore"); -} - -void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg, - uint32_t offset_imm, LiftoffRegister value, - LiftoffRegister result, StoreType type) { - bailout(kAtomics, "AtomicAdd"); -} - -void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg, - uint32_t offset_imm, LiftoffRegister value, - LiftoffRegister result, StoreType type) { - bailout(kAtomics, "AtomicSub"); -} - -void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg, - uint32_t offset_imm, LiftoffRegister value, - LiftoffRegister result, StoreType type) { - bailout(kAtomics, "AtomicAnd"); -} - -void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg, - uint32_t offset_imm, LiftoffRegister value, - LiftoffRegister result, StoreType type) { - bailout(kAtomics, "AtomicOr"); -} - -void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg, - uint32_t offset_imm, LiftoffRegister value, - LiftoffRegister result, StoreType type) { - bailout(kAtomics, "AtomicXor"); -} - -void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg, - uint32_t offset_imm, - LiftoffRegister value, - LiftoffRegister result, StoreType type) { - bailout(kAtomics, "AtomicExchange"); -} - -void LiftoffAssembler::AtomicCompareExchange( - Register dst_addr, Register offset_reg, uint32_t offset_imm, - LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result, - StoreType type) { - bailout(kAtomics, "AtomicCompareExchange"); -} - -void LiftoffAssembler::AtomicFence() { sync(); } - -void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, - uint32_t caller_slot_idx, - ValueKind kind) { - int32_t offset = kSystemPointerSize * (caller_slot_idx + 1); - liftoff::Load(this, dst, fp, offset, kind); -} - -void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, - uint32_t caller_slot_idx, - ValueKind kind) { - int32_t offset = kSystemPointerSize * (caller_slot_idx + 1); - liftoff::Store(this, fp, offset, src, kind); -} - -void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset, - ValueKind kind) { - liftoff::Load(this, dst, sp, offset, kind); -} - -void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, - ValueKind kind) { - DCHECK_NE(dst_offset, src_offset); - LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {}); - Fill(reg, src_offset, kind); - Spill(dst_offset, reg, kind); -} - -void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) { - DCHECK_NE(dst, src); - TurboAssembler::mov(dst, src); -} - -void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, - ValueKind kind) { - DCHECK_NE(dst, src); - TurboAssembler::Move(dst, src); -} - -void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) { - RecordUsedSpillOffset(offset); - MemOperand dst = liftoff::GetStackSlot(offset); - switch (kind) { - case kI32: - case kRef: - case kRefNull: - case kRtt: - sw(reg.gp(), dst); - break; - case kI64: - sw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord)); - sw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord)); - break; - case kF32: - swc1(reg.fp(), dst); - break; - case kF64: - TurboAssembler::Sdc1(reg.fp(), dst); - break; - default: - UNREACHABLE(); - } -} - -void LiftoffAssembler::Spill(int offset, WasmValue value) { - RecordUsedSpillOffset(offset); - MemOperand dst = liftoff::GetStackSlot(offset); - switch (value.type().kind()) { - case kI32: - case kRef: - case kRefNull: { - LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); - TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); - sw(tmp.gp(), dst); - break; - } - case kI64: { - LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {}); - - int32_t low_word = value.to_i64(); - int32_t high_word = value.to_i64() >> 32; - TurboAssembler::li(tmp.low_gp(), Operand(low_word)); - TurboAssembler::li(tmp.high_gp(), Operand(high_word)); - - sw(tmp.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord)); - sw(tmp.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord)); - break; - } - default: - // kWasmF32 and kWasmF64 are unreachable, since those - // constants are not tracked. - UNREACHABLE(); - } -} - -void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) { - MemOperand src = liftoff::GetStackSlot(offset); - switch (kind) { - case kI32: - case kRef: - case kRefNull: - lw(reg.gp(), src); - break; - case kI64: - lw(reg.low_gp(), liftoff::GetHalfStackSlot(offset, kLowWord)); - lw(reg.high_gp(), liftoff::GetHalfStackSlot(offset, kHighWord)); - break; - case kF32: - lwc1(reg.fp(), src); - break; - case kF64: - TurboAssembler::Ldc1(reg.fp(), src); - break; - default: - UNREACHABLE(); - } -} - -void LiftoffAssembler::FillI64Half(Register reg, int offset, RegPairHalf half) { - lw(reg, liftoff::GetHalfStackSlot(offset, half)); -} - -void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { - DCHECK_LT(0, size); - DCHECK_EQ(0, size % 4); - RecordUsedSpillOffset(start + size); - - if (size <= 48) { - // Special straight-line code for up to 12 words. Generates one - // instruction per word (<=12 instructions total). - for (int offset = 4; offset <= size; offset += 4) { - Sw(zero_reg, liftoff::GetStackSlot(start + offset)); - } - } else { - // General case for bigger counts (12 instructions). - // Use a0 for start address (inclusive), a1 for end address (exclusive). - Push(a1, a0); - Addu(a0, fp, Operand(-start - size)); - Addu(a1, fp, Operand(-start)); - - Label loop; - bind(&loop); - Sw(zero_reg, MemOperand(a0)); - addiu(a0, a0, kSystemPointerSize); - BranchShort(&loop, ne, a0, Operand(a1)); - - Pop(a1, a0); - } -} - -void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { - TurboAssembler::Mul(dst, lhs, rhs); -} - -void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs, - Label* trap_div_by_zero, - Label* trap_div_unrepresentable) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - - // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable. - TurboAssembler::li(kScratchReg, 1); - TurboAssembler::li(kScratchReg2, 1); - TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq); - TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq); - addu(kScratchReg, kScratchReg, kScratchReg2); - TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg, - Operand(zero_reg)); - - TurboAssembler::Div(dst, lhs, rhs); -} - -void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs, - Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Divu(dst, lhs, rhs); -} - -void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs, - Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Mod(dst, lhs, rhs); -} - -void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs, - Label* trap_div_by_zero) { - TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg)); - TurboAssembler::Modu(dst, lhs, rhs); -} - -#define I32_BINOP(name, instruction) \ - void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ - Register rhs) { \ - instruction(dst, lhs, rhs); \ - } - -// clang-format off -I32_BINOP(add, addu) -I32_BINOP(sub, subu) -I32_BINOP(and, and_) -I32_BINOP(or, or_) -I32_BINOP(xor, xor_) -// clang-format on - -#undef I32_BINOP - -#define I32_BINOP_I(name, instruction) \ - void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \ - int32_t imm) { \ - instruction(dst, lhs, Operand(imm)); \ - } - -// clang-format off -I32_BINOP_I(add, Addu) -I32_BINOP_I(sub, Subu) -I32_BINOP_I(and, And) -I32_BINOP_I(or, Or) -I32_BINOP_I(xor, Xor) -// clang-format on - -#undef I32_BINOP_I - -void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { - TurboAssembler::Clz(dst, src); -} - -void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { - TurboAssembler::Ctz(dst, src); -} - -bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) { - TurboAssembler::Popcnt(dst, src); - return true; -} - -#define I32_SHIFTOP(name, instruction) \ - void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ - Register amount) { \ - instruction(dst, src, amount); \ - } -#define I32_SHIFTOP_I(name, instruction) \ - I32_SHIFTOP(name, instruction##v) \ - void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \ - int amount) { \ - DCHECK(is_uint5(amount)); \ - instruction(dst, src, amount); \ - } - -I32_SHIFTOP_I(shl, sll) -I32_SHIFTOP_I(sar, sra) -I32_SHIFTOP_I(shr, srl) - -#undef I32_SHIFTOP -#undef I32_SHIFTOP_I - -void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, - int64_t imm) { - LiftoffRegister imm_reg = - GetUnusedRegister(kGpRegPair, LiftoffRegList{dst, lhs}); - int32_t imm_low_word = static_cast(imm); - int32_t imm_high_word = static_cast(imm >> 32); - TurboAssembler::li(imm_reg.low_gp(), imm_low_word); - TurboAssembler::li(imm_reg.high_gp(), imm_high_word); - TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), - lhs.high_gp(), imm_reg.low_gp(), imm_reg.high_gp(), - kScratchReg, kScratchReg2); -} - -void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - TurboAssembler::MulPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), - lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), - kScratchReg, kScratchReg2); -} - -bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs, - Label* trap_div_by_zero, - Label* trap_div_unrepresentable) { - return false; -} - -bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs, - Label* trap_div_by_zero) { - return false; -} - -bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs, - Label* trap_div_by_zero) { - return false; -} - -bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs, - Label* trap_div_by_zero) { - return false; -} - -void LiftoffAssembler::emit_i64_add(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - TurboAssembler::AddPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), - lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), - kScratchReg, kScratchReg2); -} - -void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - TurboAssembler::SubPair(dst.low_gp(), dst.high_gp(), lhs.low_gp(), - lhs.high_gp(), rhs.low_gp(), rhs.high_gp(), - kScratchReg, kScratchReg2); -} - -namespace liftoff { - -inline bool IsRegInRegPair(LiftoffRegister pair, Register reg) { - DCHECK(pair.is_gp_pair()); - return pair.low_gp() == reg || pair.high_gp() == reg; -} - -inline void Emit64BitShiftOperation( - LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister src, - Register amount, - void (TurboAssembler::*emit_shift)(Register, Register, Register, Register, - Register, Register, Register)) { - Label move, done; - LiftoffRegList pinned = {dst, src, amount}; - - // If some of destination registers are in use, get another, unused pair. - // That way we prevent overwriting some input registers while shifting. - // Do this before any branch so that the cache state will be correct for - // all conditions. - LiftoffRegister tmp = assm->GetUnusedRegister(kGpRegPair, pinned); - - // If shift amount is 0, don't do the shifting. - assm->TurboAssembler::Branch(&move, eq, amount, Operand(zero_reg)); - - if (liftoff::IsRegInRegPair(dst, amount) || dst.overlaps(src)) { - // Do the actual shift. - (assm->*emit_shift)(tmp.low_gp(), tmp.high_gp(), src.low_gp(), - src.high_gp(), amount, kScratchReg, kScratchReg2); - - // Place result in destination register. - assm->TurboAssembler::Move(dst.high_gp(), tmp.high_gp()); - assm->TurboAssembler::Move(dst.low_gp(), tmp.low_gp()); - } else { - (assm->*emit_shift)(dst.low_gp(), dst.high_gp(), src.low_gp(), - src.high_gp(), amount, kScratchReg, kScratchReg2); - } - assm->TurboAssembler::Branch(&done); - - // If shift amount is 0, move src to dst. - assm->bind(&move); - assm->TurboAssembler::Move(dst.high_gp(), src.high_gp()); - assm->TurboAssembler::Move(dst.low_gp(), src.low_gp()); - - assm->bind(&done); -} -} // namespace liftoff - -void LiftoffAssembler::emit_i64_shl(LiftoffRegister dst, LiftoffRegister src, - Register amount) { - liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShlPair); -} - -void LiftoffAssembler::emit_i64_shli(LiftoffRegister dst, LiftoffRegister src, - int32_t amount) { - UseScratchRegisterScope temps(this); - // {src.low_gp()} will still be needed after writing {dst.high_gp()} and - // {dst.low_gp()}. - Register src_low = liftoff::EnsureNoAlias(this, src.low_gp(), dst, &temps); - Register src_high = src.high_gp(); - // {src.high_gp()} will still be needed after writing {dst.high_gp()}. - if (src_high == dst.high_gp()) { - mov(kScratchReg, src_high); - src_high = kScratchReg; - } - DCHECK_NE(dst.low_gp(), kScratchReg); - DCHECK_NE(dst.high_gp(), kScratchReg); - - ShlPair(dst.low_gp(), dst.high_gp(), src_low, src_high, amount, kScratchReg); -} - -void LiftoffAssembler::emit_i64_sar(LiftoffRegister dst, LiftoffRegister src, - Register amount) { - liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::SarPair); -} - -void LiftoffAssembler::emit_i64_sari(LiftoffRegister dst, LiftoffRegister src, - int32_t amount) { - UseScratchRegisterScope temps(this); - // {src.high_gp()} will still be needed after writing {dst.high_gp()} and - // {dst.low_gp()}. - Register src_high = liftoff::EnsureNoAlias(this, src.high_gp(), dst, &temps); - DCHECK_NE(dst.low_gp(), kScratchReg); - DCHECK_NE(dst.high_gp(), kScratchReg); - - SarPair(dst.low_gp(), dst.high_gp(), src.low_gp(), src_high, amount, - kScratchReg); -} - -void LiftoffAssembler::emit_i64_shr(LiftoffRegister dst, LiftoffRegister src, - Register amount) { - liftoff::Emit64BitShiftOperation(this, dst, src, amount, - &TurboAssembler::ShrPair); -} - -void LiftoffAssembler::emit_i64_shri(LiftoffRegister dst, LiftoffRegister src, - int32_t amount) { - UseScratchRegisterScope temps(this); - // {src.high_gp()} will still be needed after writing {dst.high_gp()} and - // {dst.low_gp()}. - Register src_high = liftoff::EnsureNoAlias(this, src.high_gp(), dst, &temps); - DCHECK_NE(dst.low_gp(), kScratchReg); - DCHECK_NE(dst.high_gp(), kScratchReg); - - ShrPair(dst.low_gp(), dst.high_gp(), src.low_gp(), src_high, amount, - kScratchReg); -} - -void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { - // return high == 0 ? 32 + CLZ32(low) : CLZ32(high); - Label done; - Label high_is_zero; - Branch(&high_is_zero, eq, src.high_gp(), Operand(zero_reg)); - - clz(dst.low_gp(), src.high_gp()); - jmp(&done); - - bind(&high_is_zero); - clz(dst.low_gp(), src.low_gp()); - Addu(dst.low_gp(), dst.low_gp(), Operand(32)); - - bind(&done); - mov(dst.high_gp(), zero_reg); // High word of result is always 0. -} - -void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { - // return low == 0 ? 32 + CTZ32(high) : CTZ32(low); - Label done; - Label low_is_zero; - Branch(&low_is_zero, eq, src.low_gp(), Operand(zero_reg)); - - Ctz(dst.low_gp(), src.low_gp()); - jmp(&done); - - bind(&low_is_zero); - Ctz(dst.low_gp(), src.high_gp()); - Addu(dst.low_gp(), dst.low_gp(), Operand(32)); - - bind(&done); - mov(dst.high_gp(), zero_reg); // High word of result is always 0. -} - -bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, - LiftoffRegister src) { - // Produce partial popcnts in the two dst registers. - Register src1 = src.high_gp() == dst.low_gp() ? src.high_gp() : src.low_gp(); - Register src2 = src.high_gp() == dst.low_gp() ? src.low_gp() : src.high_gp(); - TurboAssembler::Popcnt(dst.low_gp(), src1); - TurboAssembler::Popcnt(dst.high_gp(), src2); - // Now add the two into the lower dst reg and clear the higher dst reg. - addu(dst.low_gp(), dst.low_gp(), dst.high_gp()); - mov(dst.high_gp(), zero_reg); - return true; -} - -void LiftoffAssembler::IncrementSmi(LiftoffRegister dst, int offset) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - lw(scratch, MemOperand(dst.gp(), offset)); - Addu(scratch, scratch, Operand(Smi::FromInt(1))); - sw(scratch, MemOperand(dst.gp(), offset)); -} - -void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_s(dst, src); -} - -void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) { - TurboAssembler::Neg_d(dst, src); -} - -void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs, - DoubleRegister rhs) { - Label ool, done; - TurboAssembler::Float32Min(dst, lhs, rhs, &ool); - Branch(&done); - - bind(&ool); - TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs); - bind(&done); -} - -void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, - DoubleRegister rhs) { - Label ool, done; - TurboAssembler::Float32Max(dst, lhs, rhs, &ool); - Branch(&done); - - bind(&ool); - TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs); - bind(&done); -} - -void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, - DoubleRegister rhs) { - bailout(kComplexOperation, "f32_copysign"); -} - -void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs, - DoubleRegister rhs) { - Label ool, done; - TurboAssembler::Float64Min(dst, lhs, rhs, &ool); - Branch(&done); - - bind(&ool); - TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs); - bind(&done); -} - -void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs, - DoubleRegister rhs) { - Label ool, done; - TurboAssembler::Float64Max(dst, lhs, rhs, &ool); - Branch(&done); - - bind(&ool); - TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs); - bind(&done); -} - -void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, - DoubleRegister rhs) { - bailout(kComplexOperation, "f64_copysign"); -} - -#define FP_BINOP(name, instruction) \ - void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \ - DoubleRegister rhs) { \ - instruction(dst, lhs, rhs); \ - } -#define FP_UNOP(name, instruction) \ - void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ - instruction(dst, src); \ - } -#define FP_UNOP_RETURN_TRUE(name, instruction) \ - bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \ - instruction(dst, src); \ - return true; \ - } - -FP_BINOP(f32_add, add_s) -FP_BINOP(f32_sub, sub_s) -FP_BINOP(f32_mul, mul_s) -FP_BINOP(f32_div, div_s) -FP_UNOP(f32_abs, abs_s) -FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s_s) -FP_UNOP_RETURN_TRUE(f32_floor, Floor_s_s) -FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s_s) -FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s_s) -FP_UNOP(f32_sqrt, sqrt_s) -FP_BINOP(f64_add, add_d) -FP_BINOP(f64_sub, sub_d) -FP_BINOP(f64_mul, mul_d) -FP_BINOP(f64_div, div_d) -FP_UNOP(f64_abs, abs_d) -FP_UNOP(f64_sqrt, sqrt_d) - -#undef FP_BINOP -#undef FP_UNOP - -bool LiftoffAssembler::emit_f64_ceil(DoubleRegister dst, DoubleRegister src) { - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - Ceil_d_d(dst, src); - return true; - } - return false; -} - -bool LiftoffAssembler::emit_f64_floor(DoubleRegister dst, DoubleRegister src) { - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - Floor_d_d(dst, src); - return true; - } - return false; -} - -bool LiftoffAssembler::emit_f64_trunc(DoubleRegister dst, DoubleRegister src) { - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - Trunc_d_d(dst, src); - return true; - } - return false; -} - -bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst, - DoubleRegister src) { - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - Round_d_d(dst, src); - return true; - } - return false; -} - -bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode, - LiftoffRegister dst, - LiftoffRegister src, Label* trap) { - switch (opcode) { - case kExprI32ConvertI64: - TurboAssembler::Move(dst.gp(), src.low_gp()); - return true; - case kExprI32SConvertF32: { - LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); - LiftoffRegister converted_back = - GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); - - // Real conversion. - TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); - trunc_w_s(kScratchDoubleReg, rounded.fp()); - mfc1(dst.gp(), kScratchDoubleReg); - // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead, - // because INT32_MIN allows easier out-of-bounds detection. - TurboAssembler::Addu(kScratchReg, dst.gp(), 1); - TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp()); - TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2); - - // Checking if trap. - mtc1(dst.gp(), kScratchDoubleReg); - cvt_s_w(converted_back.fp(), kScratchDoubleReg); - TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); - return true; - } - case kExprI32UConvertF32: { - LiftoffRegister rounded = GetUnusedRegister(kFpReg, LiftoffRegList{src}); - LiftoffRegister converted_back = - GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); - - // Real conversion. - TurboAssembler::Trunc_s_s(rounded.fp(), src.fp()); - TurboAssembler::Trunc_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg); - // Avoid UINT32_MAX as an overflow indicator and use 0 instead, - // because 0 allows easier out-of-bounds detection. - TurboAssembler::Addu(kScratchReg, dst.gp(), 1); - TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg); - - // Checking if trap. - TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp(), - kScratchDoubleReg); - cvt_s_d(converted_back.fp(), converted_back.fp()); - TurboAssembler::CompareF32(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); - return true; - } - case kExprI32SConvertF64: { - LiftoffRegister scratch = GetUnusedRegister(kGpReg, LiftoffRegList{dst}); - - // Try a conversion to a signed integer. - trunc_w_d(kScratchDoubleReg, src.fp()); - mfc1(dst.gp(), kScratchDoubleReg); - // Retrieve the FCSR. - cfc1(scratch.gp(), FCSR); - // Check for overflow and NaNs. - And(scratch.gp(), scratch.gp(), - kFCSROverflowCauseMask | kFCSRUnderflowCauseMask | - kFCSRInvalidOpCauseMask); - // If we had exceptions we are trap. - Branch(trap, ne, scratch.gp(), Operand(zero_reg)); - return true; - } - case kExprI32UConvertF64: { - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - LiftoffRegister rounded = - GetUnusedRegister(kFpReg, LiftoffRegList{src}); - LiftoffRegister converted_back = - GetUnusedRegister(kFpReg, LiftoffRegList{src, rounded}); - - // Real conversion. - TurboAssembler::Trunc_d_d(rounded.fp(), src.fp()); - TurboAssembler::Trunc_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg); - - // Checking if trap. - TurboAssembler::Cvt_d_uw(converted_back.fp(), dst.gp(), - kScratchDoubleReg); - TurboAssembler::CompareF64(EQ, rounded.fp(), converted_back.fp()); - TurboAssembler::BranchFalseF(trap); - return true; - } - bailout(kUnsupportedArchitecture, "kExprI32UConvertF64"); - return true; - } - case kExprI32SConvertSatF32: - bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32"); - return true; - case kExprI32UConvertSatF32: - bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32"); - return true; - case kExprI32SConvertSatF64: - bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64"); - return true; - case kExprI32UConvertSatF64: - bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64"); - return true; - case kExprI64SConvertSatF32: - bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32"); - return true; - case kExprI64UConvertSatF32: - bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32"); - return true; - case kExprI64SConvertSatF64: - bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64"); - return true; - case kExprI64UConvertSatF64: - bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64"); - return true; - case kExprI32ReinterpretF32: - mfc1(dst.gp(), src.fp()); - return true; - case kExprI64SConvertI32: - TurboAssembler::Move(dst.low_gp(), src.gp()); - TurboAssembler::Move(dst.high_gp(), src.gp()); - sra(dst.high_gp(), dst.high_gp(), 31); - return true; - case kExprI64UConvertI32: - TurboAssembler::Move(dst.low_gp(), src.gp()); - TurboAssembler::Move(dst.high_gp(), zero_reg); - return true; - case kExprI64ReinterpretF64: - mfc1(dst.low_gp(), src.fp()); - TurboAssembler::Mfhc1(dst.high_gp(), src.fp()); - return true; - case kExprF32SConvertI32: { - LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); - mtc1(src.gp(), scratch.fp()); - cvt_s_w(dst.fp(), scratch.fp()); - return true; - } - case kExprF32UConvertI32: { - LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); - TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp()); - cvt_s_d(dst.fp(), dst.fp()); - return true; - } - case kExprF32ConvertF64: - cvt_s_d(dst.fp(), src.fp()); - return true; - case kExprF32ReinterpretI32: - TurboAssembler::FmoveLow(dst.fp(), src.gp()); - return true; - case kExprF64SConvertI32: { - LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); - mtc1(src.gp(), scratch.fp()); - cvt_d_w(dst.fp(), scratch.fp()); - return true; - } - case kExprF64UConvertI32: { - LiftoffRegister scratch = GetUnusedRegister(kFpReg, LiftoffRegList{dst}); - TurboAssembler::Cvt_d_uw(dst.fp(), src.gp(), scratch.fp()); - return true; - } - case kExprF64ConvertF32: - cvt_d_s(dst.fp(), src.fp()); - return true; - case kExprF64ReinterpretI64: - mtc1(src.low_gp(), dst.fp()); - TurboAssembler::Mthc1(src.high_gp(), dst.fp()); - return true; - default: - return false; - } -} - -void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) { - bailout(kComplexOperation, "i32_signextend_i8"); -} - -void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) { - bailout(kComplexOperation, "i32_signextend_i16"); -} - -void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kComplexOperation, "i64_signextend_i8"); -} - -void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kComplexOperation, "i64_signextend_i16"); -} - -void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kComplexOperation, "i64_signextend_i32"); -} - -void LiftoffAssembler::emit_jump(Label* label) { - TurboAssembler::Branch(label); -} - -void LiftoffAssembler::emit_jump(Register target) { - TurboAssembler::Jump(target); -} - -void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, - Label* label, ValueKind kind, - Register lhs, Register rhs) { - Condition cond = liftoff::ToCondition(liftoff_cond); - if (rhs == no_reg) { - DCHECK_EQ(kind, kI32); - TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg)); - } else { - DCHECK(kind == kI32 || (is_reference(kind) && (liftoff_cond == kEqual || - liftoff_cond == kUnequal))); - TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); - } -} - -void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, - Label* label, Register lhs, - int32_t imm) { - Condition cond = liftoff::ToCondition(liftoff_cond); - TurboAssembler::Branch(label, cond, lhs, Operand(imm)); -} - -void LiftoffAssembler::emit_i32_subi_jump_negative( - Register value, int subtrahend, Label* result_negative, - const FreezeCacheState& frozen) { - TurboAssembler::Subu(value, value, Operand(subtrahend)); - TurboAssembler::Branch(result_negative, less, value, Operand(zero_reg)); -} - -void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { - sltiu(dst, src, 1); -} - -void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond, - Register dst, Register lhs, - Register rhs) { - Condition cond = liftoff::ToCondition(liftoff_cond); - Register tmp = dst; - if (dst == lhs || dst == rhs) { - tmp = GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp(); - } - // Write 1 as result. - TurboAssembler::li(tmp, 1); - - // If negative condition is true, write 0 as result. - Condition neg_cond = NegateCondition(cond); - TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond); - - // If tmp != dst, result will be moved. - TurboAssembler::Move(dst, tmp); -} - -void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { - Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList{src, dst}).gp(); - sltiu(tmp, src.low_gp(), 1); - sltiu(dst, src.high_gp(), 1); - and_(dst, dst, tmp); -} - -namespace liftoff { -inline LiftoffCondition cond_make_unsigned(LiftoffCondition cond) { - switch (cond) { - case kSignedLessThan: - return kUnsignedLessThan; - case kSignedLessEqual: - return kUnsignedLessEqual; - case kSignedGreaterThan: - return kUnsignedGreaterThan; - case kSignedGreaterEqual: - return kUnsignedGreaterEqual; - default: - return cond; - } -} -} // namespace liftoff - -void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond, - Register dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - Condition cond = liftoff::ToCondition(liftoff_cond); - Label low, cont; - - // For signed i64 comparisons, we still need to use unsigned comparison for - // the low word (the only bit carrying signedness information is the MSB in - // the high word). - Condition unsigned_cond = - liftoff::ToCondition(liftoff::cond_make_unsigned(liftoff_cond)); - - Register tmp = dst; - if (liftoff::IsRegInRegPair(lhs, dst) || liftoff::IsRegInRegPair(rhs, dst)) { - tmp = GetUnusedRegister(kGpReg, LiftoffRegList{dst, lhs, rhs}).gp(); - } - - // Write 1 initially in tmp register. - TurboAssembler::li(tmp, 1); - - // If high words are equal, then compare low words, else compare high. - Branch(&low, eq, lhs.high_gp(), Operand(rhs.high_gp())); - - TurboAssembler::LoadZeroOnCondition( - tmp, lhs.high_gp(), Operand(rhs.high_gp()), NegateCondition(cond)); - Branch(&cont); - - bind(&low); - TurboAssembler::LoadZeroOnCondition(tmp, lhs.low_gp(), Operand(rhs.low_gp()), - NegateCondition(unsigned_cond)); - - bind(&cont); - // Move result to dst register if needed. - TurboAssembler::Move(dst, tmp); -} - -namespace liftoff { - -inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition, - bool* predicate) { - switch (condition) { - case kEqual: - *predicate = true; - return EQ; - case kUnequal: - *predicate = false; - return EQ; - case kUnsignedLessThan: - *predicate = true; - return OLT; - case kUnsignedGreaterEqual: - *predicate = false; - return OLT; - case kUnsignedLessEqual: - *predicate = true; - return OLE; - case kUnsignedGreaterThan: - *predicate = false; - return OLE; - default: - *predicate = true; - break; - } - UNREACHABLE(); -} - -} // namespace liftoff - -void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond, - Register dst, DoubleRegister lhs, - DoubleRegister rhs) { - Condition cond = liftoff::ToCondition(liftoff_cond); - Label not_nan, cont; - TurboAssembler::CompareIsNanF32(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); - // If one of the operands is NaN, return 1 for f32.ne, else 0. - if (cond == ne) { - TurboAssembler::li(dst, 1); - } else { - TurboAssembler::Move(dst, zero_reg); - } - TurboAssembler::Branch(&cont); - - bind(¬_nan); - - TurboAssembler::li(dst, 1); - bool predicate; - FPUCondition fcond = - liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate); - TurboAssembler::CompareF32(fcond, lhs, rhs); - if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); - } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); - } - - bind(&cont); -} - -void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond, - Register dst, DoubleRegister lhs, - DoubleRegister rhs) { - Condition cond = liftoff::ToCondition(liftoff_cond); - Label not_nan, cont; - TurboAssembler::CompareIsNanF64(lhs, rhs); - TurboAssembler::BranchFalseF(¬_nan); - // If one of the operands is NaN, return 1 for f64.ne, else 0. - if (cond == ne) { - TurboAssembler::li(dst, 1); - } else { - TurboAssembler::Move(dst, zero_reg); - } - TurboAssembler::Branch(&cont); - - bind(¬_nan); - - TurboAssembler::li(dst, 1); - bool predicate; - FPUCondition fcond = - liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate); - TurboAssembler::CompareF64(fcond, lhs, rhs); - if (predicate) { - TurboAssembler::LoadZeroIfNotFPUCondition(dst); - } else { - TurboAssembler::LoadZeroIfFPUCondition(dst); - } - - bind(&cont); -} - -bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, - LiftoffRegister true_value, - LiftoffRegister false_value, - ValueKind kind) { - return false; -} - -void LiftoffAssembler::emit_smi_check(Register obj, Label* target, - SmiCheckMode mode) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - And(scratch, obj, Operand(kSmiTagMask)); - Condition condition = mode == kJumpOnSmi ? eq : ne; - Branch(target, condition, scratch, Operand(zero_reg)); -} - -void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr, - Register offset_reg, uintptr_t offset_imm, - LoadType type, - LoadTransformationKind transform, - uint32_t* protected_load_pc) { - bailout(kSimd, "load extend and load splat unimplemented"); -} - -void LiftoffAssembler::StoreLane(Register dst, Register offset, - uintptr_t offset_imm, LiftoffRegister src, - StoreType type, uint8_t lane, - uint32_t* protected_store_pc) { - bailout(kSimd, "storelane"); -} - -void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src, - Register addr, Register offset_reg, - uintptr_t offset_imm, LoadType type, - uint8_t laneidx, uint32_t* protected_load_pc) { - bailout(kSimd, "loadlane"); -} - -void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs, - const uint8_t shuffle[16], - bool is_swizzle) { - bailout(kSimd, "emit_i8x16_shuffle"); -} - -void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_swizzle"); -} - -void LiftoffAssembler::emit_i8x16_relaxed_swizzle(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kRelaxedSimd, "emit_i8x16_relaxed_swizzle"); -} - -void LiftoffAssembler::emit_i32x4_relaxed_trunc_f32x4_s(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f32x4_s"); -} - -void LiftoffAssembler::emit_i32x4_relaxed_trunc_f32x4_u(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f32x4_u"); -} - -void LiftoffAssembler::emit_i32x4_relaxed_trunc_f64x2_s_zero( - LiftoffRegister dst, LiftoffRegister src) { - bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f64x2_s_zero"); -} - -void LiftoffAssembler::emit_i32x4_relaxed_trunc_f64x2_u_zero( - LiftoffRegister dst, LiftoffRegister src) { - bailout(kRelaxedSimd, "emit_i32x4_relaxed_trunc_f64x2_u_zero"); -} - -void LiftoffAssembler::emit_s128_relaxed_laneselect(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - LiftoffRegister mask) { - bailout(kRelaxedSimd, "emit_s128_relaxed_laneselect"); -} - -void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i8x16_splat"); -} - -void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_splat"); -} - -void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_splat"); -} - -void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i64x2_splat"); -} - -void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f32x4_splat"); -} - -void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f64x2_splat"); -} - -#define SIMD_BINOP(name, ilv_instr, dotp_instr) \ - void LiftoffAssembler::emit_##name( \ - LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \ - MSARegister dst_msa = MSARegister::from_code(dst.liftoff_code()); \ - MSARegister src1_msa = MSARegister::from_code(src1.liftoff_code()); \ - MSARegister src2_msa = MSARegister::from_code(src2.liftoff_code()); \ - xor_v(kSimd128RegZero, kSimd128RegZero, kSimd128RegZero); \ - ilv_instr(kSimd128ScratchReg, kSimd128RegZero, src1_msa); \ - ilv_instr(kSimd128RegZero, kSimd128RegZero, src2_msa); \ - dotp_instr(dst_msa, kSimd128ScratchReg, kSimd128RegZero); \ - } - -SIMD_BINOP(i16x8_extmul_low_i8x16_s, ilvr_b, dotp_s_h) -SIMD_BINOP(i16x8_extmul_high_i8x16_s, ilvl_b, dotp_s_h) -SIMD_BINOP(i16x8_extmul_low_i8x16_u, ilvr_b, dotp_u_h) -SIMD_BINOP(i16x8_extmul_high_i8x16_u, ilvl_b, dotp_u_h) - -SIMD_BINOP(i32x4_extmul_low_i16x8_s, ilvr_h, dotp_s_w) -SIMD_BINOP(i32x4_extmul_high_i16x8_s, ilvl_h, dotp_s_w) -SIMD_BINOP(i32x4_extmul_low_i16x8_u, ilvr_h, dotp_u_w) -SIMD_BINOP(i32x4_extmul_high_i16x8_u, ilvl_h, dotp_u_w) - -SIMD_BINOP(i64x2_extmul_low_i32x4_s, ilvr_w, dotp_s_d) -SIMD_BINOP(i64x2_extmul_high_i32x4_s, ilvl_w, dotp_s_d) -SIMD_BINOP(i64x2_extmul_low_i32x4_u, ilvr_w, dotp_u_d) -SIMD_BINOP(i64x2_extmul_high_i32x4_u, ilvl_w, dotp_u_d) - -#undef SIMD_BINOP - -void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2) { - bailout(kSimd, "i16x8_q15mulr_sat_s"); -} - -void LiftoffAssembler::emit_i16x8_relaxed_q15mulr_s(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2) { - bailout(kRelaxedSimd, "emit_i16x8_relaxed_q15mulr_s"); -} - -void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_eq"); -} - -void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_ne"); -} - -void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_gt_s"); -} - -void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_gt_u"); -} - -void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_ge_s"); -} - -void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_ge_u"); -} - -void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_eq"); -} - -void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_ne"); -} - -void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_gt_s"); -} - -void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_gt_u"); -} - -void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_ge_s"); -} - -void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_ge_u"); -} - -void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_eq"); -} - -void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_ne"); -} - -void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_gt_s"); -} - -void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_gt_u"); -} - -void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_ge_s"); -} - -void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_ge_u"); -} - -void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_eq"); -} - -void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_ne"); -} - -void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_lt"); -} - -void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_le"); -} - -void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_eq"); -} - -void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_ne"); -} - -void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i64x2_abs"); -} - -void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_eq"); -} - -void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_ne"); -} - -void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_lt"); -} - -void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_le"); -} - -void LiftoffAssembler::emit_s128_const(LiftoffRegister dst, - const uint8_t imms[16]) { - bailout(kSimd, "emit_s128_const"); -} - -void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) { - bailout(kSimd, "emit_s128_not"); -} - -void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_s128_and"); -} - -void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_s128_or"); -} - -void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_s128_xor"); -} - -void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_s128_and_not"); -} - -void LiftoffAssembler::emit_s128_select(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - LiftoffRegister mask) { - bailout(kSimd, "emit_s128_select"); -} - -void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i8x16_neg"); -} - -void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_v128_anytrue"); -} - -void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i8x16_alltrue"); -} - -void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i8x16_bitmask"); -} - -void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_shl"); -} - -void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, - int32_t rhs) { - bailout(kSimd, "emit_i8x16_shli"); -} - -void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_shr_s"); -} - -void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst, - LiftoffRegister lhs, int32_t rhs) { - bailout(kSimd, "emit_i8x16_shri_s"); -} - -void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_shr_u"); -} - -void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst, - LiftoffRegister lhs, int32_t rhs) { - bailout(kSimd, "emit_i8x16_shri_u"); -} - -void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_add"); -} - -void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_add_sat_s"); -} - -void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_add_sat_u"); -} - -void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_sub"); -} - -void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_sub_sat_s"); -} - -void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_sub_sat_u"); -} - -void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_min_s"); -} - -void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_min_u"); -} - -void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_max_s"); -} - -void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_max_u"); -} - -void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i8x16_popcnt"); -} - -void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_neg"); -} - -void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_alltrue"); -} - -void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_bitmask"); -} - -void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_shl"); -} - -void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs, - int32_t rhs) { - bailout(kSimd, "emit_i16x8_shli"); -} - -void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_shr_s"); -} - -void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst, - LiftoffRegister lhs, int32_t rhs) { - bailout(kSimd, "emit_i16x8_shri_s"); -} - -void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_shr_u"); -} - -void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst, - LiftoffRegister lhs, int32_t rhs) { - bailout(kSimd, "emit_i16x8_shri_u"); -} - -void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_add"); -} - -void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_add_sat_s"); -} - -void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_add_sat_u"); -} - -void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_sub"); -} - -void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_sub_sat_s"); -} - -void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_sub_sat_u"); -} - -void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_mul"); -} - -void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_min_s"); -} - -void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_min_u"); -} - -void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_max_s"); -} - -void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_max_u"); -} - -void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_extadd_pairwise_i8x16_s"); -} - -void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_extadd_pairwise_i8x16_u"); -} - -void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_neg"); -} - -void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_alltrue"); -} - -void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_bitmask"); -} - -void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_shl"); -} - -void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs, - int32_t rhs) { - bailout(kSimd, "emit_i32x4_shli"); -} - -void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_shr_s"); -} - -void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst, - LiftoffRegister lhs, int32_t rhs) { - bailout(kSimd, "emit_i32x4_shri_s"); -} - -void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_shr_u"); -} - -void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst, - LiftoffRegister lhs, int32_t rhs) { - bailout(kSimd, "emit_i32x4_shri_u"); -} - -void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_add"); -} - -void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_sub"); -} - -void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_mul"); -} - -void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_min_s"); -} - -void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_min_u"); -} - -void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_max_s"); -} - -void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_max_u"); -} - -void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i32x4_dot_i16x8_s"); -} - -void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_extadd_pairwise_i16x8_s"); -} - -void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_extadd_pairwise_i16x8_u"); -} - -void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i64x2_neg"); -} - -void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i64x2_alltrue"); -} - -void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i64x2_bitmask"); -} - -void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_shl"); -} - -void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs, - int32_t rhs) { - bailout(kSimd, "emit_i64x2_shli"); -} - -void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_shr_s"); -} - -void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst, - LiftoffRegister lhs, int32_t rhs) { - bailout(kSimd, "emit_i64x2_shri_s"); -} - -void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_shr_u"); -} - -void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst, - LiftoffRegister lhs, int32_t rhs) { - bailout(kSimd, "emit_i64x2_shri_u"); -} - -void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_add"); -} - -void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_sub"); -} - -void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_mul"); -} - -void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_gt_s"); -} - -void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i64x2_ge_s"); -} - -void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f32x4_abs"); -} - -void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f32x4_neg"); -} - -void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f32x4_sqrt"); -} - -bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst, - LiftoffRegister src) { - return false; -} - -bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst, - LiftoffRegister src) { - return false; -} - -bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst, - LiftoffRegister src) { - return false; -} - -bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst, - LiftoffRegister src) { - return false; -} - -void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_add"); -} - -void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_sub"); -} - -void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_mul"); -} - -void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_div"); -} - -void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_min"); -} - -void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_max"); -} - -void LiftoffAssembler::emit_f32x4_relaxed_min(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kRelaxedSimd, "emit_f32x4_relaxed_min"); -} - -void LiftoffAssembler::emit_f32x4_relaxed_max(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kRelaxedSimd, "emit_f32x4_relaxed_max"); -} - -void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_pmin"); -} - -void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f32x4_pmax"); -} - -void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f64x2_abs"); -} - -void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f64x2_neg"); -} - -void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f64x2_sqrt"); -} - -bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst, - LiftoffRegister src) { - return false; -} - -bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst, - LiftoffRegister src) { - return false; -} - -bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst, - LiftoffRegister src) { - return false; -} - -bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst, - LiftoffRegister src) { - return false; -} - -void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_add"); -} - -void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_sub"); -} - -void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_mul"); -} - -void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_div"); -} - -void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_min"); -} - -void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_max"); -} - -void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_pmin"); -} - -void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_f64x2_pmax"); -} - -void LiftoffAssembler::emit_f64x2_relaxed_min(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kRelaxedSimd, "emit_f64x2_relaxed_min"); -} - -void LiftoffAssembler::emit_f64x2_relaxed_max(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kRelaxedSimd, "emit_f64x2_relaxed_max"); -} - -void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f64x2_convert_low_i32x4_s"); -} - -void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f64x2_convert_low_i32x4_u"); -} - -void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f64x2_promote_low_f32x4"); -} - -void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_sconvert_f32x4"); -} - -void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_uconvert_f32x4"); -} - -void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_s_zero"); -} - -void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_u_zero"); -} - -void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f32x4_sconvert_i32x4"); -} - -void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f32x4_uconvert_i32x4"); -} - -void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_f32x4_demote_f64x2_zero"); -} - -void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_sconvert_i16x8"); -} - -void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_uconvert_i16x8"); -} - -void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_sconvert_i32x4"); -} - -void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_uconvert_i32x4"); -} - -void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_sconvert_i8x16_low"); -} - -void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_sconvert_i8x16_high"); -} - -void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_uconvert_i8x16_low"); -} - -void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_uconvert_i8x16_high"); -} - -void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_sconvert_i16x8_low"); -} - -void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_sconvert_i16x8_high"); -} - -void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_uconvert_i16x8_low"); -} - -void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_uconvert_i16x8_high"); -} - -void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i64x2_sconvert_i32x4_low"); -} - -void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i64x2_sconvert_i32x4_high"); -} - -void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i64x2_uconvert_i32x4_low"); -} - -void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i64x2_uconvert_i32x4_high"); -} - -void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i8x16_rounding_average_u"); -} - -void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst, - LiftoffRegister lhs, - LiftoffRegister rhs) { - bailout(kSimd, "emit_i16x8_rounding_average_u"); -} - -void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i8x16_abs"); -} - -void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i16x8_abs"); -} - -void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst, - LiftoffRegister src) { - bailout(kSimd, "emit_i32x4_abs"); -} - -void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst, - LiftoffRegister lhs, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i8x16_extract_lane_s"); -} - -void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst, - LiftoffRegister lhs, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i8x16_extract_lane_u"); -} - -void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst, - LiftoffRegister lhs, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i16x8_extract_lane_s"); -} - -void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst, - LiftoffRegister lhs, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i16x8_extract_lane_u"); -} - -void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst, - LiftoffRegister lhs, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i32x4_extract_lane"); -} - -void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst, - LiftoffRegister lhs, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i64x2_extract_lane"); -} - -void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst, - LiftoffRegister lhs, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_f32x4_extract_lane"); -} - -void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst, - LiftoffRegister lhs, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_f64x2_extract_lane"); -} - -void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i8x16_replace_lane"); -} - -void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i16x8_replace_lane"); -} - -void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i32x4_replace_lane"); -} - -void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_i64x2_replace_lane"); -} - -void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_f32x4_replace_lane"); -} - -void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - uint8_t imm_lane_idx) { - bailout(kSimd, "emit_f64x2_replace_lane"); -} - -void LiftoffAssembler::emit_f32x4_qfma(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - LiftoffRegister src3) { - bailout(kRelaxedSimd, "emit_f32x4_qfma"); -} - -void LiftoffAssembler::emit_f32x4_qfms(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - LiftoffRegister src3) { - bailout(kRelaxedSimd, "emit_f32x4_qfms"); -} - -void LiftoffAssembler::emit_f64x2_qfma(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - LiftoffRegister src3) { - bailout(kRelaxedSimd, "emit_f64x2_qfma"); -} - -void LiftoffAssembler::emit_f64x2_qfms(LiftoffRegister dst, - LiftoffRegister src1, - LiftoffRegister src2, - LiftoffRegister src3) { - bailout(kRelaxedSimd, "emit_f64x2_qfms"); -} - -void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { - TurboAssembler::Ulw(limit_address, MemOperand(limit_address)); - TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address)); -} - -void LiftoffAssembler::CallTrapCallbackForTesting() { - PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp()); - CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); -} - -void LiftoffAssembler::AssertUnreachable(AbortReason reason) { - if (v8_flags.debug_code) Abort(reason); -} - -void LiftoffAssembler::PushRegisters(LiftoffRegList regs) { - LiftoffRegList gp_regs = regs & kGpCacheRegList; - unsigned num_gp_regs = gp_regs.GetNumRegsSet(); - if (num_gp_regs) { - unsigned offset = num_gp_regs * kSystemPointerSize; - addiu(sp, sp, -offset); - while (!gp_regs.is_empty()) { - LiftoffRegister reg = gp_regs.GetFirstRegSet(); - offset -= kSystemPointerSize; - sw(reg.gp(), MemOperand(sp, offset)); - gp_regs.clear(reg); - } - DCHECK_EQ(offset, 0); - } - LiftoffRegList fp_regs = regs & kFpCacheRegList; - unsigned num_fp_regs = fp_regs.GetNumRegsSet(); - if (num_fp_regs) { - addiu(sp, sp, -(num_fp_regs * kStackSlotSize)); - unsigned offset = 0; - while (!fp_regs.is_empty()) { - LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::Sdc1(reg.fp(), MemOperand(sp, offset)); - fp_regs.clear(reg); - offset += sizeof(double); - } - DCHECK_EQ(offset, num_fp_regs * sizeof(double)); - } -} - -void LiftoffAssembler::PopRegisters(LiftoffRegList regs) { - LiftoffRegList fp_regs = regs & kFpCacheRegList; - unsigned fp_offset = 0; - while (!fp_regs.is_empty()) { - LiftoffRegister reg = fp_regs.GetFirstRegSet(); - TurboAssembler::Ldc1(reg.fp(), MemOperand(sp, fp_offset)); - fp_regs.clear(reg); - fp_offset += sizeof(double); - } - if (fp_offset) addiu(sp, sp, fp_offset); - LiftoffRegList gp_regs = regs & kGpCacheRegList; - unsigned gp_offset = 0; - while (!gp_regs.is_empty()) { - LiftoffRegister reg = gp_regs.GetLastRegSet(); - lw(reg.gp(), MemOperand(sp, gp_offset)); - gp_regs.clear(reg); - gp_offset += kSystemPointerSize; - } - addiu(sp, sp, gp_offset); -} - -void LiftoffAssembler::RecordSpillsInSafepoint( - SafepointTableBuilder::Safepoint& safepoint, LiftoffRegList all_spills, - LiftoffRegList ref_spills, int spill_offset) { - int spill_space_size = 0; - while (!all_spills.is_empty()) { - LiftoffRegister reg = all_spills.GetFirstRegSet(); - if (ref_spills.has(reg)) { - safepoint.DefineTaggedStackSlot(spill_offset); - } - all_spills.clear(reg); - ++spill_offset; - spill_space_size += kSystemPointerSize; - } - // Record the number of additional spill slots. - RecordOolSpillSpaceSize(spill_space_size); -} - -void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { - DCHECK_LT(num_stack_slots, - (1 << 16) / kSystemPointerSize); // 16 bit immediate - TurboAssembler::DropAndRet(static_cast(num_stack_slots)); -} - -void LiftoffAssembler::CallC(const ValueKindSig* sig, - const LiftoffRegister* args, - const LiftoffRegister* rets, - ValueKind out_argument_kind, int stack_bytes, - ExternalReference ext_ref) { - addiu(sp, sp, -stack_bytes); - - int arg_bytes = 0; - for (ValueKind param_kind : sig->parameters()) { - liftoff::Store(this, sp, arg_bytes, *args++, param_kind); - arg_bytes += value_kind_size(param_kind); - } - DCHECK_LE(arg_bytes, stack_bytes); - - // Pass a pointer to the buffer with the arguments to the C function. - // On mips, the first argument is passed in {a0}. - constexpr Register kFirstArgReg = a0; - mov(kFirstArgReg, sp); - - // Now call the C function. - constexpr int kNumCCallArgs = 1; - PrepareCallCFunction(kNumCCallArgs, kScratchReg); - CallCFunction(ext_ref, kNumCCallArgs); - - // Move return value to the right register. - const LiftoffRegister* next_result_reg = rets; - if (sig->return_count() > 0) { - DCHECK_EQ(1, sig->return_count()); - constexpr Register kReturnReg = v0; - if (kReturnReg != next_result_reg->gp()) { - Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0)); - } - ++next_result_reg; - } - - // Load potential output value from the buffer on the stack. - if (out_argument_kind != kVoid) { - liftoff::Load(this, *next_result_reg, sp, 0, out_argument_kind); - } - - addiu(sp, sp, stack_bytes); -} - -void LiftoffAssembler::CallNativeWasmCode(Address addr) { - Call(addr, RelocInfo::WASM_CALL); -} - -void LiftoffAssembler::TailCallNativeWasmCode(Address addr) { - Jump(addr, RelocInfo::WASM_CALL); -} - -void LiftoffAssembler::CallIndirect(const ValueKindSig* sig, - compiler::CallDescriptor* call_descriptor, - Register target) { - if (target == no_reg) { - pop(kScratchReg); - Call(kScratchReg); - } else { - Call(target); - } -} - -void LiftoffAssembler::TailCallIndirect(Register target) { - if (target == no_reg) { - Pop(kScratchReg); - Jump(kScratchReg); - } else { - Jump(target); - } -} - -void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { - // A direct call to a wasm runtime stub defined in this module. - // Just encode the stub index. This will be patched at relocation. - Call(static_cast
(sid), RelocInfo::WASM_STUB_CALL); -} - -void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) { - addiu(sp, sp, -size); - TurboAssembler::Move(addr, sp); -} - -void LiftoffAssembler::DeallocateStackSlot(uint32_t size) { - addiu(sp, sp, size); -} - -void LiftoffAssembler::MaybeOSR() {} - -void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src, - ValueKind kind) { - UseScratchRegisterScope temps(this); - Register scratch = temps.Acquire(); - Label not_nan; - if (kind == kF32) { - CompareIsNanF32(src, src); - } else { - DCHECK_EQ(kind, kF64); - CompareIsNanF64(src, src); - } - BranchFalseShortF(¬_nan, USE_DELAY_SLOT); - li(scratch, 1); - sw(scratch, MemOperand(dst)); - bind(¬_nan); -} - -void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src, - Register tmp_gp, - LiftoffRegister tmp_s128, - ValueKind lane_kind) { - UNIMPLEMENTED(); -} - -void LiftoffStackSlots::Construct(int param_slots) { - DCHECK_LT(0, slots_.size()); - SortInPushOrder(); - int last_stack_slot = param_slots; - for (auto& slot : slots_) { - const int stack_slot = slot.dst_slot_; - int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize; - DCHECK_LT(0, stack_decrement); - last_stack_slot = stack_slot; - const LiftoffAssembler::VarState& src = slot.src_; - switch (src.loc()) { - case LiftoffAssembler::VarState::kStack: { - if (src.kind() == kF64) { - asm_->AllocateStackSpace(stack_decrement - kDoubleSize); - DCHECK_EQ(kLowWord, slot.half_); - asm_->lw(kScratchReg, - liftoff::GetHalfStackSlot(slot.src_offset_, kHighWord)); - asm_->push(kScratchReg); - } else { - asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize); - } - asm_->lw(kScratchReg, - liftoff::GetHalfStackSlot(slot.src_offset_, slot.half_)); - asm_->push(kScratchReg); - break; - } - case LiftoffAssembler::VarState::kRegister: { - int pushed_bytes = SlotSizeInBytes(slot); - asm_->AllocateStackSpace(stack_decrement - pushed_bytes); - if (src.kind() == kI64) { - liftoff::push( - asm_, slot.half_ == kLowWord ? src.reg().low() : src.reg().high(), - kI32); - } else { - liftoff::push(asm_, src.reg(), src.kind()); - } - break; - } - case LiftoffAssembler::VarState::kIntConst: { - // The high word is the sign extension of the low word. - asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize); - asm_->li(kScratchReg, - Operand(slot.half_ == kLowWord ? src.i32_const() - : src.i32_const() >> 31)); - asm_->push(kScratchReg); - break; - } - } - } -} - -} // namespace wasm -} // namespace internal -} // namespace v8 - -#endif // V8_WASM_BASELINE_MIPS_LIFTOFF_ASSEMBLER_MIPS_H_ diff --git a/src/wasm/jump-table-assembler.cc b/src/wasm/jump-table-assembler.cc index f82f47d4ea..e039eeed9b 100644 --- a/src/wasm/jump-table-assembler.cc +++ b/src/wasm/jump-table-assembler.cc @@ -246,7 +246,7 @@ void JumpTableAssembler::NopBytes(int bytes) { } } -#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#elif V8_TARGET_ARCH_MIPS64 void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index, Address lazy_compile_target) { int start = pc_offset(); diff --git a/src/wasm/wasm-linkage.h b/src/wasm/wasm-linkage.h index 521056847f..b8fbc3f055 100644 --- a/src/wasm/wasm-linkage.h +++ b/src/wasm/wasm-linkage.h @@ -61,15 +61,6 @@ constexpr Register kGpReturnRegisters[] = {x0, x1}; constexpr DoubleRegister kFpParamRegisters[] = {d0, d1, d2, d3, d4, d5, d6, d7}; constexpr DoubleRegister kFpReturnRegisters[] = {d0, d1}; -#elif V8_TARGET_ARCH_MIPS -// =========================================================================== -// == mips =================================================================== -// =========================================================================== -constexpr Register kGpParamRegisters[] = {a0, a2, a3}; -constexpr Register kGpReturnRegisters[] = {v0, v1}; -constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14}; -constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4}; - #elif V8_TARGET_ARCH_MIPS64 // =========================================================================== // == mips64 ================================================================= diff --git a/src/wasm/wasm-serialization.cc b/src/wasm/wasm-serialization.cc index c30e70f90e..1b8064da1f 100644 --- a/src/wasm/wasm-serialization.cc +++ b/src/wasm/wasm-serialization.cc @@ -380,9 +380,9 @@ void NativeModuleSerializer::WriteCode(const WasmCode* code, Writer* writer) { writer->WriteVector(code->reloc_info()); writer->WriteVector(code->source_positions()); writer->WriteVector(code->protected_instructions_data()); -#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || \ - V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X || \ - V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 +#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || \ + V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_RISCV32 || \ + V8_TARGET_ARCH_RISCV64 // On platforms that don't support misaligned word stores, copy to an aligned // buffer if necessary so we can relocate the serialized code. std::unique_ptr aligned_buffer; diff --git a/test/cctest/BUILD.gn b/test/cctest/BUILD.gn index 11b1a68067..0ab437883c 100644 --- a/test/cctest/BUILD.gn +++ b/test/cctest/BUILD.gn @@ -232,18 +232,6 @@ v8_source_set("cctest_sources") { "test-assembler-ia32.cc", "test-log-stack-tracer.cc", ] - } else if (v8_current_cpu == "mips") { - sources += [ ### gcmole(arch:mips) ### - "test-assembler-mips.cc", - "test-disasm-mips.cc", - "test-macro-assembler-mips.cc", - ] - } else if (v8_current_cpu == "mipsel") { - sources += [ ### gcmole(arch:mipsel) ### - "test-assembler-mips.cc", - "test-disasm-mips.cc", - "test-macro-assembler-mips.cc", - ] } else if (v8_current_cpu == "mips64") { sources += [ ### gcmole(arch:mips64) ### "test-assembler-mips64.cc", @@ -375,8 +363,7 @@ v8_source_set("cctest_sources") { if (v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" || v8_current_cpu == "arm" || v8_current_cpu == "arm64" || v8_current_cpu == "s390" || v8_current_cpu == "s390x" || - v8_current_cpu == "mips" || v8_current_cpu == "mips64" || - v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64" || + v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64" || v8_current_cpu == "loong64" || v8_current_cpu == "riscv32") { # Disable fmadd/fmsub so that expected results match generated code in diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h index 60701d559e..dedfc479f3 100644 --- a/test/cctest/cctest.h +++ b/test/cctest/cctest.h @@ -773,7 +773,7 @@ class SimulatorHelper { state->sp = reinterpret_cast(simulator_->sp()); state->fp = reinterpret_cast(simulator_->fp()); state->lr = reinterpret_cast(simulator_->lr()); -#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 +#elif V8_TARGET_ARCH_MIPS64 state->pc = reinterpret_cast(simulator_->get_pc()); state->sp = reinterpret_cast( simulator_->get_register(v8::internal::Simulator::sp)); diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status index 4eaa44db02..7bfa3867d6 100644 --- a/test/cctest/cctest.status +++ b/test/cctest/cctest.status @@ -318,28 +318,6 @@ 'test-serialize/StartupSerializerTwiceRunScript': [SKIP], }], # 'arch == arm' -############################################################################## -['arch == mipsel or arch == mips', { - # TODO(mips-team): Improve code-size on large RegExp's. - 'test-heap/TestSizeOfRegExpCode': [SKIP], - - # BUG(1075): Unresolved crashes on MIPS also. - 'test-serialize/StartupSerializerOnce': [SKIP], - 'test-serialize/StartupSerializerTwice': [SKIP], - 'test-serialize/StartupSerializerOnceRunScript': [SKIP], - 'test-serialize/StartupSerializerTwiceRunScript': [SKIP], -}], # 'arch == mipsel or arch == mips' - -############################################################################## -['arch == mips', { - # Too slow with TF. - 'test-api/ExternalArrays': [PASS, NO_VARIANTS], - - # TODO(mips-team): Currently fails on mips board. - 'test-api/Threading5': [SKIP], - 'test-api/Threading6': [SKIP], -}], # 'arch == mips' - ############################################################################## ['arch == mips64', { # TODO(mips-team): Currently fails on mips64 board. @@ -365,21 +343,21 @@ }], # 'arch == mips64el or arch == mips64' ############################################################################## -['(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips', { +['(arch == mips64el or arch == mips64) and not simd_mips', { # Skip tests that fail on MIPS architectures which don't support SIMD, # because lowering mechanism doesn't work properly 'test-run-wasm-simd/RunWasm_ReductionTest4_compiled': [SKIP], 'test-run-wasm-simd/RunWasm_ReductionTest8_compiled': [SKIP], 'test-run-wasm-simd/RunWasm_ReductionTest16_compiled': [SKIP], 'test-run-wasm-simd-liftoff/*': [SKIP], -}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips' +}], # '(arch == mips64el or arch == mips64) and not simd_mips' -['(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips', { +['(arch == mips64el or arch == mips64) and not simd_mips', { 'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP], 'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP], 'test-run-wasm/RunWasmLiftoff_Select_s128_parameters': [SKIP], 'test-run-wasm/RunWasmTurbofan_Select_s128_parameters': [SKIP], -}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips' +}], # '(arch == mips64el or arch == mips64) and not simd_mips' ############################################################################## ['mips_arch_variant == r6', { diff --git a/test/cctest/compiler/test-run-machops.cc b/test/cctest/compiler/test-run-machops.cc index 7a565fcbb2..f445eda5b9 100644 --- a/test/cctest/compiler/test-run-machops.cc +++ b/test/cctest/compiler/test-run-machops.cc @@ -4428,8 +4428,7 @@ TEST(RunTruncateFloat32ToInt32) { if (i < upper_bound && i >= lower_bound) { CHECK_EQ(static_cast(i), m.Call(i)); } else if (i < lower_bound) { -#if (V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64) && !_MIPS_ARCH_MIPS32R6 && \ - !_MIPS_ARCH_MIPS64R6 +#if V8_TARGET_ARCH_MIPS64 && !_MIPS_ARCH_MIPS64R6 CHECK_EQ(std::numeric_limits::max(), m.Call(i)); #else CHECK_EQ(std::numeric_limits::min(), m.Call(i)); diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 7c1337490a..5c3a041d75 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -17038,8 +17038,7 @@ THREADED_TEST(QuietSignalingNaNs) { } else { uint64_t stored_bits = DoubleToBits(stored_number); // Check if quiet nan (bits 51..62 all set). -#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \ - !defined(_MIPS_ARCH_MIPS64R6) && !defined(_MIPS_ARCH_MIPS32R6) && \ +#if (defined(V8_TARGET_ARCH_MIPS64)) && !defined(_MIPS_ARCH_MIPS64R6) && \ !defined(USE_SIMULATOR) // Most significant fraction bit for quiet nan is set to 0 // on MIPS architecture. Allowed by IEEE-754. @@ -17060,8 +17059,7 @@ THREADED_TEST(QuietSignalingNaNs) { } else { uint64_t stored_bits = DoubleToBits(stored_date); // Check if quiet nan (bits 51..62 all set). -#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \ - !defined(_MIPS_ARCH_MIPS64R6) && !defined(_MIPS_ARCH_MIPS32R6) && \ +#if (defined(V8_TARGET_ARCH_MIPS64)) && !defined(_MIPS_ARCH_MIPS64R6) && \ !defined(USE_SIMULATOR) // Most significant fraction bit for quiet nan is set to 0 // on MIPS architecture. Allowed by IEEE-754. diff --git a/test/cctest/test-assembler-mips.cc b/test/cctest/test-assembler-mips.cc deleted file mode 100644 index 61136236e1..0000000000 --- a/test/cctest/test-assembler-mips.cc +++ /dev/null @@ -1,10454 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include - -#include "src/base/utils/random-number-generator.h" -#include "src/codegen/assembler-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/diagnostics/disassembler.h" -#include "src/execution/simulator.h" -#include "src/heap/factory.h" -#include "src/init/v8.h" -#include "test/cctest/cctest.h" - -namespace v8 { -namespace internal { - -// Define these function prototypes to match JSEntryFunction in execution.cc. -// TODO(mips): Refine these signatures per test case. -using F1 = void*(int x, int p1, int p2, int p3, int p4); -using F2 = void*(int x, int y, int p2, int p3, int p4); -using F3 = void*(void* p, int p1, int p2, int p3, int p4); -using F4 = void*(void* p0, void* p1, int p2, int p3, int p4); - -#define __ assm. - -TEST(MIPS0) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - // Addition. - __ addu(v0, a0, a1); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - int res = reinterpret_cast(f.Call(0xAB0, 0xC, 0, 0, 0)); - CHECK_EQ(static_cast(0xABC), res); -} - - -TEST(MIPS1) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - Label L, C; - - __ mov(a1, a0); - __ li(v0, 0); - __ b(&C); - __ nop(); - - __ bind(&L); - __ addu(v0, v0, a1); - __ addiu(a1, a1, -1); - - __ bind(&C); - __ xori(v1, a1, 0); - __ Branch(&L, ne, v1, Operand(0)); - __ nop(); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - int res = reinterpret_cast(f.Call(50, 0, 0, 0, 0)); - CHECK_EQ(1275, res); -} - - -TEST(MIPS2) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - Label exit, error; - - // ----- Test all instructions. - - // Test lui, ori, and addiu, used in the li pseudo-instruction. - // This way we can then safely load registers with chosen values. - - __ ori(t0, zero_reg, 0); - __ lui(t0, 0x1234); - __ ori(t0, t0, 0); - __ ori(t0, t0, 0x0F0F); - __ ori(t0, t0, 0xF0F0); - __ addiu(t1, t0, 1); - __ addiu(t2, t1, -0x10); - - // Load values in temporary registers. - __ li(t0, 0x00000004); - __ li(t1, 0x00001234); - __ li(t2, 0x12345678); - __ li(t3, 0x7FFFFFFF); - __ li(t4, 0xFFFFFFFC); - __ li(t5, 0xFFFFEDCC); - __ li(t6, 0xEDCBA988); - __ li(t7, 0x80000000); - - // SPECIAL class. - __ srl(v0, t2, 8); // 0x00123456 - __ sll(v0, v0, 11); // 0x91A2B000 - __ sra(v0, v0, 3); // 0xF2345600 - __ srav(v0, v0, t0); // 0xFF234560 - __ sllv(v0, v0, t0); // 0xF2345600 - __ srlv(v0, v0, t0); // 0x0F234560 - __ Branch(&error, ne, v0, Operand(0x0F234560)); - __ nop(); - - __ addu(v0, t0, t1); // 0x00001238 - __ subu(v0, v0, t0); // 0x00001234 - __ Branch(&error, ne, v0, Operand(0x00001234)); - __ nop(); - __ addu(v1, t3, t0); - __ Branch(&error, ne, v1, Operand(0x80000003)); - __ nop(); - __ subu(v1, t7, t0); // 0x7FFFFFFC - __ Branch(&error, ne, v1, Operand(0x7FFFFFFC)); - __ nop(); - - __ and_(v0, t1, t2); // 0x00001230 - __ or_(v0, v0, t1); // 0x00001234 - __ xor_(v0, v0, t2); // 0x1234444C - __ nor(v0, v0, t2); // 0xEDCBA987 - __ Branch(&error, ne, v0, Operand(0xEDCBA983)); - __ nop(); - - __ slt(v0, t7, t3); - __ Branch(&error, ne, v0, Operand(0x1)); - __ nop(); - __ sltu(v0, t7, t3); - __ Branch(&error, ne, v0, Operand(zero_reg)); - __ nop(); - // End of SPECIAL class. - - __ addiu(v0, zero_reg, 0x7421); // 0x00007421 - __ addiu(v0, v0, -0x1); // 0x00007420 - __ addiu(v0, v0, -0x20); // 0x00007400 - __ Branch(&error, ne, v0, Operand(0x00007400)); - __ nop(); - __ addiu(v1, t3, 0x1); // 0x80000000 - __ Branch(&error, ne, v1, Operand(0x80000000)); - __ nop(); - - __ slti(v0, t1, 0x00002000); // 0x1 - __ slti(v0, v0, 0xFFFF8000); // 0x0 - __ Branch(&error, ne, v0, Operand(zero_reg)); - __ nop(); - __ sltiu(v0, t1, 0x00002000); // 0x1 - __ sltiu(v0, v0, 0x00008000); // 0x1 - __ Branch(&error, ne, v0, Operand(0x1)); - __ nop(); - - __ andi(v0, t1, 0xF0F0); // 0x00001030 - __ ori(v0, v0, 0x8A00); // 0x00009A30 - __ xori(v0, v0, 0x83CC); // 0x000019FC - __ Branch(&error, ne, v0, Operand(0x000019FC)); - __ nop(); - __ lui(v1, 0x8123); // 0x81230000 - __ Branch(&error, ne, v1, Operand(0x81230000)); - __ nop(); - - // Bit twiddling instructions & conditional moves. - // Uses t0-t7 as set above. - __ Clz(v0, t0); // 29 - __ Clz(v1, t1); // 19 - __ addu(v0, v0, v1); // 48 - __ Clz(v1, t2); // 3 - __ addu(v0, v0, v1); // 51 - __ Clz(v1, t7); // 0 - __ addu(v0, v0, v1); // 51 - __ Branch(&error, ne, v0, Operand(51)); - __ Movn(a0, t3, t0); // Move a0<-t3 (t0 is NOT 0). - __ Ins(a0, t1, 12, 8); // 0x7FF34FFF - __ Branch(&error, ne, a0, Operand(0x7FF34FFF)); - __ Movz(a0, t6, t7); // a0 not updated (t7 is NOT 0). - __ Ext(a1, a0, 8, 12); // 0x34F - __ Branch(&error, ne, a1, Operand(0x34F)); - __ Movz(a0, t6, v1); // a0<-t6, v0 is 0, from 8 instr back. - __ Branch(&error, ne, a0, Operand(t6)); - - // Everything was correctly executed. Load the expected result. - __ li(v0, 0x31415926); - __ b(&exit); - __ nop(); - - __ bind(&error); - // Got an error. Return a wrong result. - __ li(v0, 666); - - __ bind(&exit); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - int res = reinterpret_cast(f.Call(0xAB0, 0xC, 0, 0, 0)); - CHECK_EQ(static_cast(0x31415926), res); -} - - -TEST(MIPS3) { - // Test floating point instructions. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - double a; - double b; - double c; - double d; - double e; - double f; - double g; - double h; - double i; - float fa; - float fb; - float fc; - float fd; - float fe; - float ff; - float fg; - }; - T t; - - // Create a function that accepts &t, and loads, manipulates, and stores - // the doubles t.a ... t.f. - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - // Double precision floating point instructions. - __ Ldc1(f4, MemOperand(a0, offsetof(T, a))); - __ Ldc1(f6, MemOperand(a0, offsetof(T, b))); - __ add_d(f8, f4, f6); - __ Sdc1(f8, MemOperand(a0, offsetof(T, c))); // c = a + b. - - __ mov_d(f10, f8); // c - __ neg_d(f12, f6); // -b - __ sub_d(f10, f10, f12); - __ Sdc1(f10, MemOperand(a0, offsetof(T, d))); // d = c - (-b). - - __ Sdc1(f4, MemOperand(a0, offsetof(T, b))); // b = a. - - __ li(t0, 120); - __ mtc1(t0, f14); - __ cvt_d_w(f14, f14); // f14 = 120.0. - __ mul_d(f10, f10, f14); - __ Sdc1(f10, MemOperand(a0, offsetof(T, e))); // e = d * 120 = 1.8066e16. - - __ div_d(f12, f10, f4); - __ Sdc1(f12, MemOperand(a0, offsetof(T, f))); // f = e / a = 120.44. - - __ sqrt_d(f14, f12); - __ Sdc1(f14, MemOperand(a0, offsetof(T, g))); - // g = sqrt(f) = 10.97451593465515908537 - - if (IsMipsArchVariant(kMips32r2)) { - __ Ldc1(f4, MemOperand(a0, offsetof(T, h))); - __ Ldc1(f6, MemOperand(a0, offsetof(T, i))); - __ madd_d(f14, f6, f4, f6); - __ Sdc1(f14, MemOperand(a0, offsetof(T, h))); - } - - // Single precision floating point instructions. - __ lwc1(f4, MemOperand(a0, offsetof(T, fa)) ); - __ lwc1(f6, MemOperand(a0, offsetof(T, fb)) ); - __ add_s(f8, f4, f6); - __ swc1(f8, MemOperand(a0, offsetof(T, fc)) ); // fc = fa + fb. - - __ neg_s(f10, f6); // -fb - __ sub_s(f10, f8, f10); - __ swc1(f10, MemOperand(a0, offsetof(T, fd)) ); // fd = fc - (-fb). - - __ swc1(f4, MemOperand(a0, offsetof(T, fb)) ); // fb = fa. - - __ li(t0, 120); - __ mtc1(t0, f14); - __ cvt_s_w(f14, f14); // f14 = 120.0. - __ mul_s(f10, f10, f14); - __ swc1(f10, MemOperand(a0, offsetof(T, fe)) ); // fe = fd * 120 - - __ div_s(f12, f10, f4); - __ swc1(f12, MemOperand(a0, offsetof(T, ff)) ); // ff = fe / fa - - __ sqrt_s(f14, f12); - __ swc1(f14, MemOperand(a0, offsetof(T, fg)) ); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - // Double test values. - t.a = 1.5e14; - t.b = 2.75e11; - t.c = 0.0; - t.d = 0.0; - t.e = 0.0; - t.f = 0.0; - t.h = 1.5; - t.i = 2.75; - // Single test values. - t.fa = 1.5e6; - t.fb = 2.75e4; - t.fc = 0.0; - t.fd = 0.0; - t.fe = 0.0; - t.ff = 0.0; - f.Call(&t, 0, 0, 0, 0); - // Expected double results. - CHECK_EQ(1.5e14, t.a); - CHECK_EQ(1.5e14, t.b); - CHECK_EQ(1.50275e14, t.c); - CHECK_EQ(1.50550e14, t.d); - CHECK_EQ(1.8066e16, t.e); - CHECK_EQ(120.44, t.f); - CHECK_EQ(10.97451593465515908537, t.g); - if (IsMipsArchVariant(kMips32r2)) { - CHECK_EQ(6.875, t.h); - } - // Expected single results. - CHECK_EQ(1.5e6, t.fa); - CHECK_EQ(1.5e6, t.fb); - CHECK_EQ(1.5275e06, t.fc); - CHECK_EQ(1.5550e06, t.fd); - CHECK_EQ(1.866e08, t.fe); - CHECK_EQ(124.40000152587890625, t.ff); - CHECK_EQ(11.1534748077392578125, t.fg); -} - - -TEST(MIPS4) { - // Exchange between GP anf FP registers is done through memory - // on FPXX compiled binaries and architectures that do not support - // MTHC1 and MTFC1. If this is the case, skipping this test. - if (IsFpxxMode() && - (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson))) { - return; - } - - // Test moves between floating point and integer registers. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - double a; - double b; - double c; - }; - T t; - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, a))); - __ Ldc1(f6, MemOperand(a0, offsetof(T, b))); - - // Swap f4 and f6, by using four integer registers, t0-t3. - if (IsFp32Mode()) { - __ mfc1(t0, f4); - __ mfc1(t1, f5); - __ mfc1(t2, f6); - __ mfc1(t3, f7); - - __ mtc1(t0, f6); - __ mtc1(t1, f7); - __ mtc1(t2, f4); - __ mtc1(t3, f5); - } else { - CHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson)); - DCHECK(IsFp64Mode() || IsFpxxMode()); - __ mfc1(t0, f4); - __ mfhc1(t1, f4); - __ mfc1(t2, f6); - __ mfhc1(t3, f6); - - __ mtc1(t0, f6); - __ mthc1(t1, f6); - __ mtc1(t2, f4); - __ mthc1(t3, f4); - } - - // Store the swapped f4 and f5 back to memory. - __ Sdc1(f4, MemOperand(a0, offsetof(T, a))); - __ Sdc1(f6, MemOperand(a0, offsetof(T, c))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.a = 1.5e22; - t.b = 2.75e11; - t.c = 17.17; - f.Call(&t, 0, 0, 0, 0); - - CHECK_EQ(2.75e11, t.a); - CHECK_EQ(2.75e11, t.b); - CHECK_EQ(1.5e22, t.c); -} - - -TEST(MIPS5) { - // Test conversions between doubles and integers. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - double a; - double b; - int i; - int j; - }; - T t; - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - // Load all structure elements to registers. - __ Ldc1(f4, MemOperand(a0, offsetof(T, a))); - __ Ldc1(f6, MemOperand(a0, offsetof(T, b))); - __ lw(t0, MemOperand(a0, offsetof(T, i)) ); - __ lw(t1, MemOperand(a0, offsetof(T, j)) ); - - // Convert double in f4 to int in element i. - __ cvt_w_d(f8, f4); - __ mfc1(t2, f8); - __ sw(t2, MemOperand(a0, offsetof(T, i)) ); - - // Convert double in f6 to int in element j. - __ cvt_w_d(f10, f6); - __ mfc1(t3, f10); - __ sw(t3, MemOperand(a0, offsetof(T, j)) ); - - // Convert int in original i (t0) to double in a. - __ mtc1(t0, f12); - __ cvt_d_w(f0, f12); - __ Sdc1(f0, MemOperand(a0, offsetof(T, a))); - - // Convert int in original j (t1) to double in b. - __ mtc1(t1, f14); - __ cvt_d_w(f2, f14); - __ Sdc1(f2, MemOperand(a0, offsetof(T, b))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.a = 1.5e4; - t.b = 2.75e8; - t.i = 12345678; - t.j = -100000; - f.Call(&t, 0, 0, 0, 0); - - CHECK_EQ(12345678.0, t.a); - CHECK_EQ(-100000.0, t.b); - CHECK_EQ(15000, t.i); - CHECK_EQ(275000000, t.j); -} - - -TEST(MIPS6) { - // Test simple memory loads and stores. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - uint32_t ui; - int32_t si; - int32_t r1; - int32_t r2; - int32_t r3; - int32_t r4; - int32_t r5; - int32_t r6; - }; - T t; - - Assembler assm(AssemblerOptions{}); - - // Basic word load/store. - __ lw(t0, MemOperand(a0, offsetof(T, ui)) ); - __ sw(t0, MemOperand(a0, offsetof(T, r1)) ); - - // lh with positive data. - __ lh(t1, MemOperand(a0, offsetof(T, ui)) ); - __ sw(t1, MemOperand(a0, offsetof(T, r2)) ); - - // lh with negative data. - __ lh(t2, MemOperand(a0, offsetof(T, si)) ); - __ sw(t2, MemOperand(a0, offsetof(T, r3)) ); - - // lhu with negative data. - __ lhu(t3, MemOperand(a0, offsetof(T, si)) ); - __ sw(t3, MemOperand(a0, offsetof(T, r4)) ); - - // lb with negative data. - __ lb(t4, MemOperand(a0, offsetof(T, si)) ); - __ sw(t4, MemOperand(a0, offsetof(T, r5)) ); - - // sh writes only 1/2 of word. - __ lui(t5, 0x3333); - __ ori(t5, t5, 0x3333); - __ sw(t5, MemOperand(a0, offsetof(T, r6)) ); - __ lhu(t5, MemOperand(a0, offsetof(T, si)) ); - __ sh(t5, MemOperand(a0, offsetof(T, r6)) ); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.ui = 0x11223344; - t.si = 0x99AABBCC; - f.Call(&t, 0, 0, 0, 0); - - CHECK_EQ(static_cast(0x11223344), t.r1); -#if __BYTE_ORDER == __LITTLE_ENDIAN - CHECK_EQ(static_cast(0x3344), t.r2); - CHECK_EQ(static_cast(0xFFFFBBCC), t.r3); - CHECK_EQ(static_cast(0x0000BBCC), t.r4); - CHECK_EQ(static_cast(0xFFFFFFCC), t.r5); - CHECK_EQ(static_cast(0x3333BBCC), t.r6); -#elif __BYTE_ORDER == __BIG_ENDIAN - CHECK_EQ(static_cast(0x1122), t.r2); - CHECK_EQ(static_cast(0xFFFF99AA), t.r3); - CHECK_EQ(static_cast(0x000099AA), t.r4); - CHECK_EQ(static_cast(0xFFFFFF99), t.r5); - CHECK_EQ(static_cast(0x99AA3333), t.r6); -#else -#error Unknown endianness -#endif -} - - -TEST(MIPS7) { - // Test floating point compare and branch instructions. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - double a; - double b; - double c; - double d; - double e; - double f; - int32_t result; - }; - T t; - - // Create a function that accepts &t, and loads, manipulates, and stores - // the doubles t.a ... t.f. - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - Label neither_is_nan, less_than, outa_here; - - __ Ldc1(f4, MemOperand(a0, offsetof(T, a))); - __ Ldc1(f6, MemOperand(a0, offsetof(T, b))); - if (!IsMipsArchVariant(kMips32r6)) { - __ c(UN, D, f4, f6); - __ bc1f(&neither_is_nan); - } else { - __ cmp(UN, L, f2, f4, f6); - __ bc1eqz(&neither_is_nan, f2); - } - __ nop(); - __ sw(zero_reg, MemOperand(a0, offsetof(T, result)) ); - __ Branch(&outa_here); - - __ bind(&neither_is_nan); - - if (IsMipsArchVariant(kLoongson)) { - __ c(OLT, D, f6, f4); - __ bc1t(&less_than); - } else if (IsMipsArchVariant(kMips32r6)) { - __ cmp(OLT, L, f2, f6, f4); - __ bc1nez(&less_than, f2); - } else { - __ c(OLT, D, f6, f4, 2); - __ bc1t(&less_than, 2); - } - - __ nop(); - __ sw(zero_reg, MemOperand(a0, offsetof(T, result)) ); - __ Branch(&outa_here); - - __ bind(&less_than); - __ Addu(t0, zero_reg, Operand(1)); - __ sw(t0, MemOperand(a0, offsetof(T, result)) ); // Set true. - - - // This test-case should have additional tests. - - __ bind(&outa_here); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.a = 1.5e14; - t.b = 2.75e11; - t.c = 2.0; - t.d = -4.0; - t.e = 0.0; - t.f = 0.0; - t.result = 0; - f.Call(&t, 0, 0, 0, 0); - CHECK_EQ(1.5e14, t.a); - CHECK_EQ(2.75e11, t.b); - CHECK_EQ(1, t.result); -} - - -TEST(MIPS8) { - // Test ROTR and ROTRV instructions. - if (IsMipsArchVariant(kMips32r2)) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - int32_t input; - int32_t result_rotr_4; - int32_t result_rotr_8; - int32_t result_rotr_12; - int32_t result_rotr_16; - int32_t result_rotr_20; - int32_t result_rotr_24; - int32_t result_rotr_28; - int32_t result_rotrv_4; - int32_t result_rotrv_8; - int32_t result_rotrv_12; - int32_t result_rotrv_16; - int32_t result_rotrv_20; - int32_t result_rotrv_24; - int32_t result_rotrv_28; - }; - T t; - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - // Basic word load. - __ lw(t0, MemOperand(a0, offsetof(T, input)) ); - - // ROTR instruction (called through the Ror macro). - __ Ror(t1, t0, 0x0004); - __ Ror(t2, t0, 0x0008); - __ Ror(t3, t0, 0x000C); - __ Ror(t4, t0, 0x0010); - __ Ror(t5, t0, 0x0014); - __ Ror(t6, t0, 0x0018); - __ Ror(t7, t0, 0x001C); - - // Basic word store. - __ sw(t1, MemOperand(a0, offsetof(T, result_rotr_4)) ); - __ sw(t2, MemOperand(a0, offsetof(T, result_rotr_8)) ); - __ sw(t3, MemOperand(a0, offsetof(T, result_rotr_12)) ); - __ sw(t4, MemOperand(a0, offsetof(T, result_rotr_16)) ); - __ sw(t5, MemOperand(a0, offsetof(T, result_rotr_20)) ); - __ sw(t6, MemOperand(a0, offsetof(T, result_rotr_24)) ); - __ sw(t7, MemOperand(a0, offsetof(T, result_rotr_28)) ); - - // ROTRV instruction (called through the Ror macro). - __ li(t7, 0x0004); - __ Ror(t1, t0, t7); - __ li(t7, 0x0008); - __ Ror(t2, t0, t7); - __ li(t7, 0x000C); - __ Ror(t3, t0, t7); - __ li(t7, 0x0010); - __ Ror(t4, t0, t7); - __ li(t7, 0x0014); - __ Ror(t5, t0, t7); - __ li(t7, 0x0018); - __ Ror(t6, t0, t7); - __ li(t7, 0x001C); - __ Ror(t7, t0, t7); - - // Basic word store. - __ sw(t1, MemOperand(a0, offsetof(T, result_rotrv_4)) ); - __ sw(t2, MemOperand(a0, offsetof(T, result_rotrv_8)) ); - __ sw(t3, MemOperand(a0, offsetof(T, result_rotrv_12)) ); - __ sw(t4, MemOperand(a0, offsetof(T, result_rotrv_16)) ); - __ sw(t5, MemOperand(a0, offsetof(T, result_rotrv_20)) ); - __ sw(t6, MemOperand(a0, offsetof(T, result_rotrv_24)) ); - __ sw(t7, MemOperand(a0, offsetof(T, result_rotrv_28)) ); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.input = 0x12345678; - f.Call(&t, 0x0, 0, 0, 0); - CHECK_EQ(static_cast(0x81234567), t.result_rotr_4); - CHECK_EQ(static_cast(0x78123456), t.result_rotr_8); - CHECK_EQ(static_cast(0x67812345), t.result_rotr_12); - CHECK_EQ(static_cast(0x56781234), t.result_rotr_16); - CHECK_EQ(static_cast(0x45678123), t.result_rotr_20); - CHECK_EQ(static_cast(0x34567812), t.result_rotr_24); - CHECK_EQ(static_cast(0x23456781), t.result_rotr_28); - - CHECK_EQ(static_cast(0x81234567), t.result_rotrv_4); - CHECK_EQ(static_cast(0x78123456), t.result_rotrv_8); - CHECK_EQ(static_cast(0x67812345), t.result_rotrv_12); - CHECK_EQ(static_cast(0x56781234), t.result_rotrv_16); - CHECK_EQ(static_cast(0x45678123), t.result_rotrv_20); - CHECK_EQ(static_cast(0x34567812), t.result_rotrv_24); - CHECK_EQ(static_cast(0x23456781), t.result_rotrv_28); - } -} - - -TEST(MIPS9) { - // Test BRANCH improvements. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - Label exit, exit2, exit3; - - __ Branch(&exit, ge, a0, Operand(zero_reg)); - __ Branch(&exit2, ge, a0, Operand(0x00001FFF)); - __ Branch(&exit3, ge, a0, Operand(0x0001FFFF)); - - __ bind(&exit); - __ bind(&exit2); - __ bind(&exit3); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - USE(code); -} - - -TEST(MIPS10) { - // Test conversions between doubles and words. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - double a; - double b; - int32_t dbl_mant; - int32_t dbl_exp; - int32_t word; - int32_t b_word; - }; - T t; - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kLoongson)) return; - - // Load all structure elements to registers. - // (f0, f1) = a (fp32), f0 = a (fp64) - __ Ldc1(f0, MemOperand(a0, offsetof(T, a))); - - __ mfc1(t0, f0); // t0 = f0(31..0) - __ mfhc1(t1, f0); // t1 = sign_extend(f0(63..32)) - __ sw(t0, MemOperand(a0, offsetof(T, dbl_mant))); // dbl_mant = t0 - __ sw(t1, MemOperand(a0, offsetof(T, dbl_exp))); // dbl_exp = t1 - - // Convert double in f0 to word, save hi/lo parts. - __ cvt_w_d(f0, f0); // a_word = (word)a - __ mfc1(t0, f0); // f0 has a 32-bits word. t0 = a_word - __ sw(t0, MemOperand(a0, offsetof(T, word))); // word = a_word - - // Convert the b word to double b. - __ lw(t0, MemOperand(a0, offsetof(T, b_word))); - __ mtc1(t0, f8); // f8 has a 32-bits word. - __ cvt_d_w(f10, f8); - __ Sdc1(f10, MemOperand(a0, offsetof(T, b))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.a = 2.147483646e+09; // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double. - t.b_word = 0x0FF00FF0; // 0x0FF00FF0 -> 0x as double. - f.Call(&t, 0, 0, 0, 0); - CHECK_EQ(static_cast(0x41DFFFFF), t.dbl_exp); - CHECK_EQ(static_cast(0xFF800000), t.dbl_mant); - CHECK_EQ(static_cast(0x7FFFFFFE), t.word); - // 0x0FF00FF0 -> 2.6739096+e08 - CHECK_EQ(2.6739096e08, t.b); -} - - -TEST(MIPS11) { - // Do not run test on MIPS32r6, as these instructions are removed. - if (IsMipsArchVariant(kMips32r6)) return; - // Test LWL, LWR, SWL and SWR instructions. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - int32_t reg_init; - int32_t mem_init; - int32_t lwl_0; - int32_t lwl_1; - int32_t lwl_2; - int32_t lwl_3; - int32_t lwr_0; - int32_t lwr_1; - int32_t lwr_2; - int32_t lwr_3; - int32_t swl_0; - int32_t swl_1; - int32_t swl_2; - int32_t swl_3; - int32_t swr_0; - int32_t swr_1; - int32_t swr_2; - int32_t swr_3; - }; - T t; - - Assembler assm(AssemblerOptions{}); - - // Test all combinations of LWL and vAddr. - __ lw(t0, MemOperand(a0, offsetof(T, reg_init)) ); - __ lwl(t0, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t0, MemOperand(a0, offsetof(T, lwl_0)) ); - - __ lw(t1, MemOperand(a0, offsetof(T, reg_init)) ); - __ lwl(t1, MemOperand(a0, offsetof(T, mem_init) + 1) ); - __ sw(t1, MemOperand(a0, offsetof(T, lwl_1)) ); - - __ lw(t2, MemOperand(a0, offsetof(T, reg_init)) ); - __ lwl(t2, MemOperand(a0, offsetof(T, mem_init) + 2) ); - __ sw(t2, MemOperand(a0, offsetof(T, lwl_2)) ); - - __ lw(t3, MemOperand(a0, offsetof(T, reg_init)) ); - __ lwl(t3, MemOperand(a0, offsetof(T, mem_init) + 3) ); - __ sw(t3, MemOperand(a0, offsetof(T, lwl_3)) ); - - // Test all combinations of LWR and vAddr. - __ lw(t0, MemOperand(a0, offsetof(T, reg_init)) ); - __ lwr(t0, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t0, MemOperand(a0, offsetof(T, lwr_0)) ); - - __ lw(t1, MemOperand(a0, offsetof(T, reg_init)) ); - __ lwr(t1, MemOperand(a0, offsetof(T, mem_init) + 1) ); - __ sw(t1, MemOperand(a0, offsetof(T, lwr_1)) ); - - __ lw(t2, MemOperand(a0, offsetof(T, reg_init)) ); - __ lwr(t2, MemOperand(a0, offsetof(T, mem_init) + 2) ); - __ sw(t2, MemOperand(a0, offsetof(T, lwr_2)) ); - - __ lw(t3, MemOperand(a0, offsetof(T, reg_init)) ); - __ lwr(t3, MemOperand(a0, offsetof(T, mem_init) + 3) ); - __ sw(t3, MemOperand(a0, offsetof(T, lwr_3)) ); - - // Test all combinations of SWL and vAddr. - __ lw(t0, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t0, MemOperand(a0, offsetof(T, swl_0)) ); - __ lw(t0, MemOperand(a0, offsetof(T, reg_init)) ); - __ swl(t0, MemOperand(a0, offsetof(T, swl_0)) ); - - __ lw(t1, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t1, MemOperand(a0, offsetof(T, swl_1)) ); - __ lw(t1, MemOperand(a0, offsetof(T, reg_init)) ); - __ swl(t1, MemOperand(a0, offsetof(T, swl_1) + 1) ); - - __ lw(t2, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t2, MemOperand(a0, offsetof(T, swl_2)) ); - __ lw(t2, MemOperand(a0, offsetof(T, reg_init)) ); - __ swl(t2, MemOperand(a0, offsetof(T, swl_2) + 2) ); - - __ lw(t3, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t3, MemOperand(a0, offsetof(T, swl_3)) ); - __ lw(t3, MemOperand(a0, offsetof(T, reg_init)) ); - __ swl(t3, MemOperand(a0, offsetof(T, swl_3) + 3) ); - - // Test all combinations of SWR and vAddr. - __ lw(t0, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t0, MemOperand(a0, offsetof(T, swr_0)) ); - __ lw(t0, MemOperand(a0, offsetof(T, reg_init)) ); - __ swr(t0, MemOperand(a0, offsetof(T, swr_0)) ); - - __ lw(t1, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t1, MemOperand(a0, offsetof(T, swr_1)) ); - __ lw(t1, MemOperand(a0, offsetof(T, reg_init)) ); - __ swr(t1, MemOperand(a0, offsetof(T, swr_1) + 1) ); - - __ lw(t2, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t2, MemOperand(a0, offsetof(T, swr_2)) ); - __ lw(t2, MemOperand(a0, offsetof(T, reg_init)) ); - __ swr(t2, MemOperand(a0, offsetof(T, swr_2) + 2) ); - - __ lw(t3, MemOperand(a0, offsetof(T, mem_init)) ); - __ sw(t3, MemOperand(a0, offsetof(T, swr_3)) ); - __ lw(t3, MemOperand(a0, offsetof(T, reg_init)) ); - __ swr(t3, MemOperand(a0, offsetof(T, swr_3) + 3) ); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.reg_init = 0xAABBCCDD; - t.mem_init = 0x11223344; - - f.Call(&t, 0, 0, 0, 0); - -#if __BYTE_ORDER == __LITTLE_ENDIAN - CHECK_EQ(static_cast(0x44BBCCDD), t.lwl_0); - CHECK_EQ(static_cast(0x3344CCDD), t.lwl_1); - CHECK_EQ(static_cast(0x223344DD), t.lwl_2); - CHECK_EQ(static_cast(0x11223344), t.lwl_3); - - CHECK_EQ(static_cast(0x11223344), t.lwr_0); - CHECK_EQ(static_cast(0xAA112233), t.lwr_1); - CHECK_EQ(static_cast(0xAABB1122), t.lwr_2); - CHECK_EQ(static_cast(0xAABBCC11), t.lwr_3); - - CHECK_EQ(static_cast(0x112233AA), t.swl_0); - CHECK_EQ(static_cast(0x1122AABB), t.swl_1); - CHECK_EQ(static_cast(0x11AABBCC), t.swl_2); - CHECK_EQ(static_cast(0xAABBCCDD), t.swl_3); - - CHECK_EQ(static_cast(0xAABBCCDD), t.swr_0); - CHECK_EQ(static_cast(0xBBCCDD44), t.swr_1); - CHECK_EQ(static_cast(0xCCDD3344), t.swr_2); - CHECK_EQ(static_cast(0xDD223344), t.swr_3); -#elif __BYTE_ORDER == __BIG_ENDIAN - CHECK_EQ(static_cast(0x11223344), t.lwl_0); - CHECK_EQ(static_cast(0x223344DD), t.lwl_1); - CHECK_EQ(static_cast(0x3344CCDD), t.lwl_2); - CHECK_EQ(static_cast(0x44BBCCDD), t.lwl_3); - - CHECK_EQ(static_cast(0xAABBCC11), t.lwr_0); - CHECK_EQ(static_cast(0xAABB1122), t.lwr_1); - CHECK_EQ(static_cast(0xAA112233), t.lwr_2); - CHECK_EQ(static_cast(0x11223344), t.lwr_3); - - CHECK_EQ(static_cast(0xAABBCCDD), t.swl_0); - CHECK_EQ(static_cast(0x11AABBCC), t.swl_1); - CHECK_EQ(static_cast(0x1122AABB), t.swl_2); - CHECK_EQ(static_cast(0x112233AA), t.swl_3); - - CHECK_EQ(static_cast(0xDD223344), t.swr_0); - CHECK_EQ(static_cast(0xCCDD3344), t.swr_1); - CHECK_EQ(static_cast(0xBBCCDD44), t.swr_2); - CHECK_EQ(static_cast(0xAABBCCDD), t.swr_3); -#else -#error Unknown endianness -#endif -} - - -TEST(MIPS12) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - int32_t x; - int32_t y; - int32_t y1; - int32_t y2; - int32_t y3; - int32_t y4; - }; - T t; - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ mov(t6, fp); // Save frame pointer. - __ mov(fp, a0); // Access struct T by fp. - __ lw(t0, MemOperand(a0, offsetof(T, y)) ); - __ lw(t3, MemOperand(a0, offsetof(T, y4)) ); - - __ addu(t1, t0, t3); - __ subu(t4, t0, t3); - __ nop(); - __ push(t0); // These instructions disappear after opt. - __ Pop(); - __ addu(t0, t0, t0); - __ nop(); - __ Pop(); // These instructions disappear after opt. - __ push(t3); - __ nop(); - __ push(t3); // These instructions disappear after opt. - __ pop(t3); - __ nop(); - __ push(t3); - __ pop(t4); - __ nop(); - __ sw(t0, MemOperand(fp, offsetof(T, y)) ); - __ lw(t0, MemOperand(fp, offsetof(T, y)) ); - __ nop(); - __ sw(t0, MemOperand(fp, offsetof(T, y)) ); - __ lw(t1, MemOperand(fp, offsetof(T, y)) ); - __ nop(); - __ push(t1); - __ lw(t1, MemOperand(fp, offsetof(T, y)) ); - __ pop(t1); - __ nop(); - __ push(t1); - __ lw(t2, MemOperand(fp, offsetof(T, y)) ); - __ pop(t1); - __ nop(); - __ push(t1); - __ lw(t2, MemOperand(fp, offsetof(T, y)) ); - __ pop(t2); - __ nop(); - __ push(t2); - __ lw(t2, MemOperand(fp, offsetof(T, y)) ); - __ pop(t1); - __ nop(); - __ push(t1); - __ lw(t2, MemOperand(fp, offsetof(T, y)) ); - __ pop(t3); - __ nop(); - - __ mov(fp, t6); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.x = 1; - t.y = 2; - t.y1 = 3; - t.y2 = 4; - t.y3 = 0XBABA; - t.y4 = 0xDEDA; - - f.Call(&t, 0, 0, 0, 0); - - CHECK_EQ(3, t.y1); -} - - -TEST(MIPS13) { - // Test Cvt_d_uw and Trunc_uw_d macros. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - double cvt_big_out; - double cvt_small_out; - uint32_t trunc_big_out; - uint32_t trunc_small_out; - uint32_t cvt_big_in; - uint32_t cvt_small_in; - }; - T t; - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ sw(t0, MemOperand(a0, offsetof(T, cvt_small_in))); - __ Cvt_d_uw(f10, t0, f4); - __ Sdc1(f10, MemOperand(a0, offsetof(T, cvt_small_out))); - - __ Trunc_uw_d(f10, f10, f4); - __ swc1(f10, MemOperand(a0, offsetof(T, trunc_small_out))); - - __ sw(t0, MemOperand(a0, offsetof(T, cvt_big_in))); - __ Cvt_d_uw(f8, t0, f4); - __ Sdc1(f8, MemOperand(a0, offsetof(T, cvt_big_out))); - - __ Trunc_uw_d(f8, f8, f4); - __ swc1(f8, MemOperand(a0, offsetof(T, trunc_big_out))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - t.cvt_big_in = 0xFFFFFFFF; - t.cvt_small_in = 333; - - f.Call(&t, 0, 0, 0, 0); - - CHECK_EQ(t.cvt_big_out, static_cast(t.cvt_big_in)); - CHECK_EQ(t.cvt_small_out, static_cast(t.cvt_small_in)); - - CHECK_EQ(static_cast(t.trunc_big_out), static_cast(t.cvt_big_in)); - CHECK_EQ(static_cast(t.trunc_small_out), - static_cast(t.cvt_small_in)); -} - - -TEST(MIPS14) { - // Test round, floor, ceil, trunc, cvt. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - -#define ROUND_STRUCT_ELEMENT(x) \ - uint32_t x##_isNaN2008; \ - int32_t x##_up_out; \ - int32_t x##_down_out; \ - int32_t neg_##x##_up_out; \ - int32_t neg_##x##_down_out; \ - uint32_t x##_err1_out; \ - uint32_t x##_err2_out; \ - uint32_t x##_err3_out; \ - uint32_t x##_err4_out; \ - int32_t x##_invalid_result; - - struct T { - double round_up_in; - double round_down_in; - double neg_round_up_in; - double neg_round_down_in; - double err1_in; - double err2_in; - double err3_in; - double err4_in; - - ROUND_STRUCT_ELEMENT(round) - ROUND_STRUCT_ELEMENT(floor) - ROUND_STRUCT_ELEMENT(ceil) - ROUND_STRUCT_ELEMENT(trunc) - ROUND_STRUCT_ELEMENT(cvt) - }; - T t; - -#undef ROUND_STRUCT_ELEMENT - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - // Save FCSR. - __ cfc1(a1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); -#define RUN_ROUND_TEST(x) \ - __ cfc1(t0, FCSR); \ - __ sw(t0, MemOperand(a0, offsetof(T, x##_isNaN2008))); \ - __ Ldc1(f0, MemOperand(a0, offsetof(T, round_up_in))); \ - __ x##_w_d(f0, f0); \ - __ swc1(f0, MemOperand(a0, offsetof(T, x##_up_out))); \ - \ - __ Ldc1(f0, MemOperand(a0, offsetof(T, round_down_in))); \ - __ x##_w_d(f0, f0); \ - __ swc1(f0, MemOperand(a0, offsetof(T, x##_down_out))); \ - \ - __ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_up_in))); \ - __ x##_w_d(f0, f0); \ - __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_up_out))); \ - \ - __ Ldc1(f0, MemOperand(a0, offsetof(T, neg_round_down_in))); \ - __ x##_w_d(f0, f0); \ - __ swc1(f0, MemOperand(a0, offsetof(T, neg_##x##_down_out))); \ - \ - __ Ldc1(f0, MemOperand(a0, offsetof(T, err1_in))); \ - __ ctc1(zero_reg, FCSR); \ - __ x##_w_d(f0, f0); \ - __ cfc1(a2, FCSR); \ - __ sw(a2, MemOperand(a0, offsetof(T, x##_err1_out))); \ - \ - __ Ldc1(f0, MemOperand(a0, offsetof(T, err2_in))); \ - __ ctc1(zero_reg, FCSR); \ - __ x##_w_d(f0, f0); \ - __ cfc1(a2, FCSR); \ - __ sw(a2, MemOperand(a0, offsetof(T, x##_err2_out))); \ - \ - __ Ldc1(f0, MemOperand(a0, offsetof(T, err3_in))); \ - __ ctc1(zero_reg, FCSR); \ - __ x##_w_d(f0, f0); \ - __ cfc1(a2, FCSR); \ - __ sw(a2, MemOperand(a0, offsetof(T, x##_err3_out))); \ - \ - __ Ldc1(f0, MemOperand(a0, offsetof(T, err4_in))); \ - __ ctc1(zero_reg, FCSR); \ - __ x##_w_d(f0, f0); \ - __ cfc1(a2, FCSR); \ - __ sw(a2, MemOperand(a0, offsetof(T, x##_err4_out))); \ - __ swc1(f0, MemOperand(a0, offsetof(T, x##_invalid_result))); - - RUN_ROUND_TEST(round) - RUN_ROUND_TEST(floor) - RUN_ROUND_TEST(ceil) - RUN_ROUND_TEST(trunc) - RUN_ROUND_TEST(cvt) - - // Restore FCSR. - __ ctc1(a1, FCSR); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - t.round_up_in = 123.51; - t.round_down_in = 123.49; - t.neg_round_up_in = -123.5; - t.neg_round_down_in = -123.49; - t.err1_in = 123.51; - t.err2_in = 1; - t.err3_in = static_cast(1) + 0xFFFFFFFF; - t.err4_in = NAN; - - f.Call(&t, 0, 0, 0, 0); - -#define GET_FPU_ERR(x) (static_cast(x & kFCSRFlagMask)) -#define CHECK_NAN2008(x) (x & kFCSRNaN2008FlagMask) -#define CHECK_ROUND_RESULT(type) \ - CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \ - CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \ - CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \ - CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \ - if (CHECK_NAN2008(t.type##_isNaN2008) && kArchVariant == kMips32r6) {\ - CHECK_EQ(static_cast(0), t.type##_invalid_result);\ - } else {\ - CHECK_EQ(static_cast(kFPUInvalidResult), t.type##_invalid_result);\ - } - - - CHECK_ROUND_RESULT(round); - CHECK_ROUND_RESULT(floor); - CHECK_ROUND_RESULT(ceil); - CHECK_ROUND_RESULT(cvt); -} - - -TEST(MIPS15) { - // Test chaining of label usages within instructions (issue 1644). - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - Assembler assm(AssemblerOptions{}); - - Label target; - __ beq(v0, v1, &target); - __ nop(); - __ bne(v0, v1, &target); - __ nop(); - __ bind(&target); - __ nop(); -} - - -// ----------------------mips32r6 specific tests---------------------- -TEST(seleqz_selnez) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct Test { - int a; - int b; - int c; - int d; - double e; - double f; - double g; - double h; - float i; - float j; - float k; - float l; - }; - - Test test; - // Integer part of test. - __ addiu(t1, zero_reg, 1); // t1 = 1 - __ seleqz(t3, t1, zero_reg); // t3 = 1 - __ sw(t3, MemOperand(a0, offsetof(Test, a))); // a = 1 - __ seleqz(t2, t1, t1); // t2 = 0 - __ sw(t2, MemOperand(a0, offsetof(Test, b))); // b = 0 - __ selnez(t3, t1, zero_reg); // t3 = 1; - __ sw(t3, MemOperand(a0, offsetof(Test, c))); // c = 0 - __ selnez(t3, t1, t1); // t3 = 1 - __ sw(t3, MemOperand(a0, offsetof(Test, d))); // d = 1 - // Floating point part of test. - __ Ldc1(f0, MemOperand(a0, offsetof(Test, e))); // src - __ Ldc1(f2, MemOperand(a0, offsetof(Test, f))); // test - __ lwc1(f8, MemOperand(a0, offsetof(Test, i)) ); // src - __ lwc1(f10, MemOperand(a0, offsetof(Test, j)) ); // test - __ seleqz_d(f4, f0, f2); - __ selnez_d(f6, f0, f2); - __ seleqz_s(f12, f8, f10); - __ selnez_s(f14, f8, f10); - __ Sdc1(f4, MemOperand(a0, offsetof(Test, g))); // src - __ Sdc1(f6, MemOperand(a0, offsetof(Test, h))); // src - __ swc1(f12, MemOperand(a0, offsetof(Test, k)) ); // src - __ swc1(f14, MemOperand(a0, offsetof(Test, l)) ); // src - __ jr(ra); - __ nop(); - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&test, 0, 0, 0, 0)); - - CHECK_EQ(1, test.a); - CHECK_EQ(0, test.b); - CHECK_EQ(0, test.c); - CHECK_EQ(1, test.d); - - const int test_size = 3; - const int input_size = 5; - - double inputs_D[input_size] = {0.0, 65.2, -70.32, - 18446744073709551621.0, -18446744073709551621.0}; - double outputs_D[input_size] = {0.0, 65.2, -70.32, - 18446744073709551621.0, -18446744073709551621.0}; - double tests_D[test_size*2] = {2.8, 2.9, -2.8, -2.9, - 18446744073709551616.0, 18446744073709555712.0}; - float inputs_S[input_size] = {0.0, 65.2, -70.32, - 18446744073709551621.0, -18446744073709551621.0}; - float outputs_S[input_size] = {0.0, 65.2, -70.32, - 18446744073709551621.0, -18446744073709551621.0}; - float tests_S[test_size*2] = {2.9, 2.8, -2.9, -2.8, - 18446744073709551616.0, 18446746272732807168.0}; - for (int j=0; j < test_size; j+=2) { - for (int i=0; i < input_size; i++) { - test.e = inputs_D[i]; - test.f = tests_D[j]; - test.i = inputs_S[i]; - test.j = tests_S[j]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(outputs_D[i], test.g); - CHECK_EQ(0, test.h); - CHECK_EQ(outputs_S[i], test.k); - CHECK_EQ(0, test.l); - - test.f = tests_D[j+1]; - test.j = tests_S[j+1]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(0, test.g); - CHECK_EQ(outputs_D[i], test.h); - CHECK_EQ(0, test.k); - CHECK_EQ(outputs_S[i], test.l); - } - } - } -} - - -TEST(min_max) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - double a; - double b; - double c; - double d; - float e; - float f; - float g; - float h; - }; - - TestFloat test; - const double dnan = std::numeric_limits::quiet_NaN(); - const double dinf = std::numeric_limits::infinity(); - const double dminf = -std::numeric_limits::infinity(); - const float fnan = std::numeric_limits::quiet_NaN(); - const float finf = std::numeric_limits::infinity(); - const float fminf = std::numeric_limits::infinity(); - const int kTableLength = 13; - double inputsa[kTableLength] = {2.0, 3.0, dnan, 3.0, -0.0, 0.0, dinf, - dnan, 42.0, dinf, dminf, dinf, dnan}; - double inputsb[kTableLength] = {3.0, 2.0, 3.0, dnan, 0.0, -0.0, dnan, - dinf, dinf, 42.0, dinf, dminf, dnan}; - double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, - -0.0, dinf, dinf, 42.0, 42.0, - dminf, dminf, dnan}; - double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, dinf, - dinf, dinf, dinf, dinf, dinf, dnan}; - - float inputse[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf, - fnan, 42.0, finf, fminf, finf, fnan}; - float inputsf[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan, - finf, finf, 42.0, finf, fminf, fnan}; - float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0, - -0.0, finf, finf, 42.0, 42.0, - fminf, fminf, fnan}; - float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf, - finf, finf, finf, finf, finf, fnan}; - - __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); - __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); - __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e))); - __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f))); - __ min_d(f10, f4, f8); - __ max_d(f12, f4, f8); - __ min_s(f14, f2, f6); - __ max_s(f16, f2, f6); - __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); - __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d))); - __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g))); - __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h))); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputsa[i]; - test.b = inputsb[i]; - test.e = inputse[i]; - test.f = inputsf[i]; - - f.Call(&test, 0, 0, 0, 0); - - CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c))); - CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d))); - CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); - CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); - } - } -} - - -TEST(rint_d) { - if (IsMipsArchVariant(kMips32r6)) { - const int kTableLength = 30; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - double a; - double b; - int fcsr; - }; - - TestFloat test; - double inputs[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, - 1.7976931348623157E+308, 6.27463370218383111104242366943E-307, - 309485009821345068724781056.89, - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - double outputs_RN[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, - 1.7976931348623157E308, 0, - 309485009821345068724781057.0, - 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, - -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - double outputs_RZ[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, - 1.7976931348623157E308, 0, - 309485009821345068724781057.0, - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - double outputs_RP[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, - 1.7976931348623157E308, 1, - 309485009821345068724781057.0, - 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - double outputs_RM[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147, - 1.7976931348623157E308, 0, - 309485009821345068724781057.0, - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - int fcsr_inputs[4] = - {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; - double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; - __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); - __ lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)) ); - __ cfc1(t1, FCSR); - __ ctc1(t0, FCSR); - __ rint_d(f8, f4); - __ Sdc1(f8, MemOperand(a0, offsetof(TestFloat, b))); - __ ctc1(t1, FCSR); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - for (int j = 0; j < 4; j++) { - test.fcsr = fcsr_inputs[j]; - for (int i = 0; i < kTableLength; i++) { - test.a = inputs[i]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.b, outputs[j][i]); - } - } - } -} - - -TEST(sel) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct Test { - double dd; - double ds; - double dt; - float fd; - float fs; - float ft; - }; - - Test test; - __ Ldc1(f0, MemOperand(a0, offsetof(Test, dd))); // test - __ Ldc1(f2, MemOperand(a0, offsetof(Test, ds))); // src1 - __ Ldc1(f4, MemOperand(a0, offsetof(Test, dt))); // src2 - __ lwc1(f6, MemOperand(a0, offsetof(Test, fd)) ); // test - __ lwc1(f8, MemOperand(a0, offsetof(Test, fs)) ); // src1 - __ lwc1(f10, MemOperand(a0, offsetof(Test, ft)) ); // src2 - __ sel_d(f0, f2, f4); - __ sel_s(f6, f8, f10); - __ Sdc1(f0, MemOperand(a0, offsetof(Test, dd))); - __ swc1(f6, MemOperand(a0, offsetof(Test, fd)) ); - __ jr(ra); - __ nop(); - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - const int test_size = 3; - const int input_size = 5; - - double inputs_dt[input_size] = {0.0, 65.2, -70.32, - 18446744073709551621.0, -18446744073709551621.0}; - double inputs_ds[input_size] = {0.1, 69.88, -91.325, - 18446744073709551625.0, -18446744073709551625.0}; - float inputs_ft[input_size] = {0.0, 65.2, -70.32, - 18446744073709551621.0, -18446744073709551621.0}; - float inputs_fs[input_size] = {0.1, 69.88, -91.325, - 18446744073709551625.0, -18446744073709551625.0}; - double tests_D[test_size*2] = {2.8, 2.9, -2.8, -2.9, - 18446744073709551616.0, 18446744073709555712.0}; - float tests_S[test_size*2] = {2.9, 2.8, -2.9, -2.8, - 18446744073709551616.0, 18446746272732807168.0}; - for (int j=0; j < test_size; j+=2) { - for (int i=0; i < input_size; i++) { - test.dt = inputs_dt[i]; - test.dd = tests_D[j]; - test.ds = inputs_ds[i]; - test.ft = inputs_ft[i]; - test.fd = tests_S[j]; - test.fs = inputs_fs[i]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.dd, inputs_ds[i]); - CHECK_EQ(test.fd, inputs_fs[i]); - - test.dd = tests_D[j+1]; - test.fd = tests_S[j+1]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.dd, inputs_dt[i]); - CHECK_EQ(test.fd, inputs_ft[i]); - } - } - } -} - - -TEST(rint_s) { - if (IsMipsArchVariant(kMips32r6)) { - const int kTableLength = 30; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - float a; - float b; - int fcsr; - }; - - TestFloat test; - float inputs[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, - 1.7976931348623157E+38, 6.27463370218383111104242366943E-37, - 309485009821345068724781056.89, - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - float outputs_RN[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, - 1.7976931348623157E38, 0, - 309485009821345068724781057.0, - 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, - -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - float outputs_RZ[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, - 1.7976931348623157E38, 0, - 309485009821345068724781057.0, - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - float outputs_RP[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, - 1.7976931348623157E38, 1, - 309485009821345068724781057.0, - 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - float outputs_RM[kTableLength] = {18446744073709551617.0, - 4503599627370496.0, -4503599627370496.0, - 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37, - 1.7976931348623157E38, 0, - 309485009821345068724781057.0, - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, - 37778931862957161709568.0, 37778931862957161709569.0, - 37778931862957161709580.0, 37778931862957161709581.0, - 37778931862957161709582.0, 37778931862957161709583.0, - 37778931862957161709584.0, 37778931862957161709585.0, - 37778931862957161709586.0, 37778931862957161709587.0}; - int fcsr_inputs[4] = - {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; - float* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; - __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, a)) ); - __ lw(t0, MemOperand(a0, offsetof(TestFloat, fcsr)) ); - __ cfc1(t1, FCSR); - __ ctc1(t0, FCSR); - __ rint_s(f8, f4); - __ swc1(f8, MemOperand(a0, offsetof(TestFloat, b)) ); - __ ctc1(t1, FCSR); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - for (int j = 0; j < 4; j++) { - test.fcsr = fcsr_inputs[j]; - for (int i = 0; i < kTableLength; i++) { - test.a = inputs[i]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.b, outputs[j][i]); - } - } - } -} - - -TEST(Cvt_d_uw) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestStruct { - unsigned input; - uint64_t output; - }; - - unsigned inputs[] = {0x0, 0xFFFFFFFF, 0x80000000, 0x7FFFFFFF}; - - uint64_t outputs[] = {0x0, 0x41EFFFFFFFE00000, 0x41E0000000000000, - 0x41DFFFFFFFC00000}; - - int kTableLength = sizeof(inputs)/sizeof(inputs[0]); - - TestStruct test; - - __ lw(t1, MemOperand(a0, offsetof(TestStruct, input))); - __ Cvt_d_uw(f4, t1, f6); - __ Sdc1(f4, MemOperand(a0, offsetof(TestStruct, output))); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.input = inputs[i]; - (f.Call(&test, 0, 0, 0, 0)); - // Check outputs - CHECK_EQ(test.output, outputs[i]); - } -} - - -TEST(mina_maxa) { - if (IsMipsArchVariant(kMips32r6)) { - const int kTableLength = 23; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - const double dnan = std::numeric_limits::quiet_NaN(); - const double dinf = std::numeric_limits::infinity(); - const double dminf = -std::numeric_limits::infinity(); - const float fnan = std::numeric_limits::quiet_NaN(); - const float finf = std::numeric_limits::infinity(); - const float fminf = std::numeric_limits::infinity(); - - struct TestFloat { - double a; - double b; - double resd; - double resd1; - float c; - float d; - float resf; - float resf1; - }; - - TestFloat test; - double inputsa[kTableLength] = { - 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8, - dnan, 3.0, -0.0, 0.0, dinf, dnan, 42.0, dinf, dminf, dinf, dnan}; - double inputsb[kTableLength] = { - 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, - 3.0, dnan, 0.0, -0.0, dnan, dinf, dinf, 42.0, dinf, dminf, dnan}; - double resd[kTableLength] = { - 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, - 3.0, 3.0, -0.0, -0.0, dinf, dinf, 42.0, 42.0, dminf, dminf, dnan}; - double resd1[kTableLength] = { - 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, - 3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf, dinf, dinf, dinf, dnan}; - float inputsc[kTableLength] = { - 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8, - fnan, 3.0, -0.0, 0.0, finf, fnan, 42.0, finf, fminf, finf, fnan}; - float inputsd[kTableLength] = {4.8, 5.3, 6.1, -10.0, -8.9, -9.8, - 9.8, 9.8, 9.8, -9.8, -11.2, -9.8, - 3.0, fnan, -0.0, 0.0, fnan, finf, - finf, 42.0, finf, fminf, fnan}; - float resf[kTableLength] = { - 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8, - 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan}; - float resf1[kTableLength] = { - 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8, - 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan}; - - __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a))); - __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, b))); - __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, c)) ); - __ lwc1(f10, MemOperand(a0, offsetof(TestFloat, d)) ); - __ mina_d(f6, f2, f4); - __ mina_s(f12, f8, f10); - __ maxa_d(f14, f2, f4); - __ maxa_s(f16, f8, f10); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, resf)) ); - __ Sdc1(f6, MemOperand(a0, offsetof(TestFloat, resd))); - __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resf1)) ); - __ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resd1))); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputsa[i]; - test.b = inputsb[i]; - test.c = inputsc[i]; - test.d = inputsd[i]; - (f.Call(&test, 0, 0, 0, 0)); - if (i < kTableLength - 1) { - CHECK_EQ(test.resd, resd[i]); - CHECK_EQ(test.resf, resf[i]); - CHECK_EQ(test.resd1, resd1[i]); - CHECK_EQ(test.resf1, resf1[i]); - } else { - CHECK(std::isnan(test.resd)); - CHECK(std::isnan(test.resf)); - CHECK(std::isnan(test.resd1)); - CHECK(std::isnan(test.resf1)); - } - } - } -} - - -// ----------------------mips32r2 specific tests---------------------- -TEST(trunc_l) { - if (IsMipsArchVariant(kMips32r2) && IsFp64Mode()) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - const double dFPU64InvalidResult = static_cast(kFPU64InvalidResult); - struct Test { - uint32_t isNaN2008; - double a; - float b; - int64_t c; // a trunc result - int64_t d; // b trunc result - }; - const int kTableLength = 15; - double inputs_D[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - float inputs_S[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - double outputs[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 2147483648.0, dFPU64InvalidResult, - dFPU64InvalidResult}; - double outputsNaN2008[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 2147483648.0, - 0, - dFPU64InvalidResult}; - - __ cfc1(t1, FCSR); - __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); - __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); - __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); - __ trunc_l_d(f8, f4); - __ trunc_l_s(f10, f6); - __ Sdc1(f8, MemOperand(a0, offsetof(Test, c))); - __ Sdc1(f10, MemOperand(a0, offsetof(Test, d))); - __ jr(ra); - __ nop(); - Test test; - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.b = inputs_S[i]; - (f.Call(&test, 0, 0, 0, 0)); - if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && - kArchVariant == kMips32r6) { - CHECK_EQ(test.c, outputsNaN2008[i]); - } else { - CHECK_EQ(test.c, outputs[i]); - } - CHECK_EQ(test.d, test.c); - } - } -} - - -TEST(movz_movn) { - if (IsMipsArchVariant(kMips32r2)) { - const int kTableLength = 4; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - int32_t rt; - double a; - double b; - double bold; - double b1; - double bold1; - float c; - float d; - float dold; - float d1; - float dold1; - }; - - TestFloat test; - double inputs_D[kTableLength] = { - 5.3, -5.3, 5.3, -2.9 - }; - double inputs_S[kTableLength] = { - 4.8, 4.8, -4.8, -0.29 - }; - - float outputs_S[kTableLength] = { - 4.8, 4.8, -4.8, -0.29 - }; - double outputs_D[kTableLength] = { - 5.3, -5.3, 5.3, -2.9 - }; - - __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, a))); - __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) ); - __ lw(t0, MemOperand(a0, offsetof(TestFloat, rt)) ); - __ Move(f12, 0.0); - __ Move(f10, 0.0); - __ Move(f16, 0.0); - __ Move(f14, 0.0); - __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, bold))); - __ swc1(f10, MemOperand(a0, offsetof(TestFloat, dold)) ); - __ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, bold1))); - __ swc1(f14, MemOperand(a0, offsetof(TestFloat, dold1)) ); - __ movz_s(f10, f6, t0); - __ movz_d(f12, f2, t0); - __ movn_s(f14, f6, t0); - __ movn_d(f16, f2, t0); - __ swc1(f10, MemOperand(a0, offsetof(TestFloat, d)) ); - __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, b))); - __ swc1(f14, MemOperand(a0, offsetof(TestFloat, d1)) ); - __ Sdc1(f16, MemOperand(a0, offsetof(TestFloat, b1))); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.c = inputs_S[i]; - - test.rt = 1; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.b, test.bold); - CHECK_EQ(test.d, test.dold); - CHECK_EQ(test.b1, outputs_D[i]); - CHECK_EQ(test.d1, outputs_S[i]); - - test.rt = 0; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.b, outputs_D[i]); - CHECK_EQ(test.d, outputs_S[i]); - CHECK_EQ(test.b1, test.bold1); - CHECK_EQ(test.d1, test.dold1); - } - } -} - - -TEST(movt_movd) { - if (IsMipsArchVariant(kMips32r2)) { - const int kTableLength = 4; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - - struct TestFloat { - double srcd; - double dstd; - double dstdold; - double dstd1; - double dstdold1; - float srcf; - float dstf; - float dstfold; - float dstf1; - float dstfold1; - int32_t cc; - int32_t fcsr; - }; - - TestFloat test; - double inputs_D[kTableLength] = { - 5.3, -5.3, 20.8, -2.9 - }; - double inputs_S[kTableLength] = { - 4.88, 4.8, -4.8, -0.29 - }; - - float outputs_S[kTableLength] = { - 4.88, 4.8, -4.8, -0.29 - }; - double outputs_D[kTableLength] = { - 5.3, -5.3, 20.8, -2.9 - }; - int condition_flags[8] = {0, 1, 2, 3, 4, 5, 6, 7}; - - for (int i = 0; i < kTableLength; i++) { - test.srcd = inputs_D[i]; - test.srcf = inputs_S[i]; - - for (int j = 0; j< 8; j++) { - test.cc = condition_flags[j]; - if (test.cc == 0) { - test.fcsr = 1 << 23; - } else { - test.fcsr = 1 << (24+condition_flags[j]); - } - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - __ Ldc1(f2, MemOperand(a0, offsetof(TestFloat, srcd))); - __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, srcf)) ); - __ lw(t1, MemOperand(a0, offsetof(TestFloat, fcsr)) ); - __ cfc1(t0, FCSR); - __ ctc1(t1, FCSR); - __ li(t2, 0x0); - __ mtc1(t2, f12); - __ mtc1(t2, f10); - __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold)) ); - __ movt_s(f12, f4, test.cc); - __ movt_d(f10, f2, test.cc); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf)) ); - __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd))); - __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstdold1))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstfold1)) ); - __ movf_s(f12, f4, test.cc); - __ movf_d(f10, f2, test.cc); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, dstf1)) ); - __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, dstd1))); - __ ctc1(t0, FCSR); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.dstf, outputs_S[i]); - CHECK_EQ(test.dstd, outputs_D[i]); - CHECK_EQ(test.dstf1, test.dstfold1); - CHECK_EQ(test.dstd1, test.dstdold1); - test.fcsr = 0; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.dstf, test.dstfold); - CHECK_EQ(test.dstd, test.dstdold); - CHECK_EQ(test.dstf1, outputs_S[i]); - CHECK_EQ(test.dstd1, outputs_D[i]); - } - } - } -} - - -// ----------------------tests for all archs-------------------------- -TEST(cvt_w_d) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct Test { - double a; - int32_t b; - int32_t fcsr; - }; - const int kTableLength = 24; - double inputs[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483637.0, 2147483638.0, 2147483639.0, - 2147483640.0, 2147483641.0, 2147483642.0, - 2147483643.0, 2147483644.0, 2147483645.0, - 2147483646.0, 2147483647.0, 2147483653.0 - }; - double outputs_RN[kTableLength] = { - 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, - -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, - 2147483637.0, 2147483638.0, 2147483639.0, - 2147483640.0, 2147483641.0, 2147483642.0, - 2147483643.0, 2147483644.0, 2147483645.0, - 2147483646.0, 2147483647.0, kFPUInvalidResult}; - double outputs_RZ[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 2147483637.0, 2147483638.0, 2147483639.0, - 2147483640.0, 2147483641.0, 2147483642.0, - 2147483643.0, 2147483644.0, 2147483645.0, - 2147483646.0, 2147483647.0, kFPUInvalidResult}; - double outputs_RP[kTableLength] = { - 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 2147483637.0, 2147483638.0, 2147483639.0, - 2147483640.0, 2147483641.0, 2147483642.0, - 2147483643.0, 2147483644.0, 2147483645.0, - 2147483646.0, 2147483647.0, kFPUInvalidResult}; - double outputs_RM[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, - 2147483637.0, 2147483638.0, 2147483639.0, - 2147483640.0, 2147483641.0, 2147483642.0, - 2147483643.0, 2147483644.0, 2147483645.0, - 2147483646.0, 2147483647.0, kFPUInvalidResult}; - int fcsr_inputs[4] = - {kRoundToNearest, kRoundToZero, kRoundToPlusInf, kRoundToMinusInf}; - double* outputs[4] = {outputs_RN, outputs_RZ, outputs_RP, outputs_RM}; - __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); - __ lw(t0, MemOperand(a0, offsetof(Test, fcsr)) ); - __ cfc1(t1, FCSR); - __ ctc1(t0, FCSR); - __ cvt_w_d(f8, f4); - __ swc1(f8, MemOperand(a0, offsetof(Test, b)) ); - __ ctc1(t1, FCSR); - __ jr(ra); - __ nop(); - Test test; - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int j = 0; j < 4; j++) { - test.fcsr = fcsr_inputs[j]; - for (int i = 0; i < kTableLength; i++) { - test.a = inputs[i]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.b, outputs[j][i]); - } - } -} - - -TEST(trunc_w) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct Test { - uint32_t isNaN2008; - double a; - float b; - int32_t c; // a trunc result - int32_t d; // b trunc result - }; - const int kTableLength = 15; - double inputs_D[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - float inputs_S[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - double outputs[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - kFPUInvalidResult, kFPUInvalidResult, - kFPUInvalidResult}; - double outputsNaN2008[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - kFPUInvalidResult, - 0, - kFPUInvalidResult}; - - __ cfc1(t1, FCSR); - __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); - __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); - __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); - __ trunc_w_d(f8, f4); - __ trunc_w_s(f10, f6); - __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); - __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); - __ jr(ra); - __ nop(); - Test test; - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.b = inputs_S[i]; - (f.Call(&test, 0, 0, 0, 0)); - if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) { - CHECK_EQ(test.c, outputsNaN2008[i]); - } else { - CHECK_EQ(test.c, outputs[i]); - } - CHECK_EQ(test.d, test.c); - } -} - - -TEST(round_w) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct Test { - uint32_t isNaN2008; - double a; - float b; - int32_t c; // a trunc result - int32_t d; // b trunc result - }; - const int kTableLength = 15; - double inputs_D[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - float inputs_S[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - double outputs[kTableLength] = { - 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, - -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, - kFPUInvalidResult, kFPUInvalidResult, - kFPUInvalidResult}; - double outputsNaN2008[kTableLength] = { - 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, - -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, - kFPUInvalidResult, 0, - kFPUInvalidResult}; - - __ cfc1(t1, FCSR); - __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); - __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); - __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); - __ round_w_d(f8, f4); - __ round_w_s(f10, f6); - __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); - __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); - __ jr(ra); - __ nop(); - Test test; - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.b = inputs_S[i]; - (f.Call(&test, 0, 0, 0, 0)); - if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) { - CHECK_EQ(test.c, outputsNaN2008[i]); - } else { - CHECK_EQ(test.c, outputs[i]); - } - CHECK_EQ(test.d, test.c); - } -} - - -TEST(round_l) { - if (IsFp64Mode()) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - const double dFPU64InvalidResult = static_cast(kFPU64InvalidResult); - struct Test { - uint32_t isNaN2008; - double a; - float b; - int64_t c; - int64_t d; - }; - const int kTableLength = 15; - double inputs_D[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - float inputs_S[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - double outputs[kTableLength] = { - 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, - -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, - 2147483648.0, dFPU64InvalidResult, - dFPU64InvalidResult}; - double outputsNaN2008[kTableLength] = { - 2.0, 3.0, 2.0, 3.0, 4.0, 4.0, - -2.0, -3.0, -2.0, -3.0, -4.0, -4.0, - 2147483648.0, - 0, - dFPU64InvalidResult}; - - __ cfc1(t1, FCSR); - __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); - __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); - __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); - __ round_l_d(f8, f4); - __ round_l_s(f10, f6); - __ Sdc1(f8, MemOperand(a0, offsetof(Test, c))); - __ Sdc1(f10, MemOperand(a0, offsetof(Test, d))); - __ jr(ra); - __ nop(); - Test test; - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.b = inputs_S[i]; - (f.Call(&test, 0, 0, 0, 0)); - if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && - kArchVariant == kMips32r6) { - CHECK_EQ(test.c, outputsNaN2008[i]); - } else { - CHECK_EQ(test.c, outputs[i]); - } - CHECK_EQ(test.d, test.c); - } - } -} - - -TEST(sub) { - const int kTableLength = 12; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - float a; - float b; - float resultS; - double c; - double d; - double resultD; - }; - - TestFloat test; - double inputfs_D[kTableLength] = { - 5.3, 4.8, 2.9, -5.3, -4.8, -2.9, - 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 - }; - double inputft_D[kTableLength] = { - 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, - -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 - }; - double outputs_D[kTableLength] = { - 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, - 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 - }; - float inputfs_S[kTableLength] = { - 5.3, 4.8, 2.9, -5.3, -4.8, -2.9, - 5.3, 4.8, 2.9, -5.3, -4.8, -2.9 - }; - float inputft_S[kTableLength] = { - 4.8, 5.3, 2.9, 4.8, 5.3, 2.9, - -4.8, -5.3, -2.9, -4.8, -5.3, -2.9 - }; - float outputs_S[kTableLength] = { - 0.5, -0.5, 0.0, -10.1, -10.1, -5.8, - 10.1, 10.1, 5.8, -0.5, 0.5, 0.0 - }; - __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); - __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) ); - __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c))); - __ Ldc1(f10, MemOperand(a0, offsetof(TestFloat, d))); - __ sub_s(f6, f2, f4); - __ sub_d(f12, f8, f10); - __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) ); - __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD))); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputfs_S[i]; - test.b = inputft_S[i]; - test.c = inputfs_D[i]; - test.d = inputft_D[i]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.resultS, outputs_S[i]); - CHECK_EQ(test.resultD, outputs_D[i]); - } -} - - -TEST(sqrt_rsqrt_recip) { - const int kTableLength = 4; - const double deltaDouble = 2E-15; - const float deltaFloat = 2E-7; - const float sqrt2_s = sqrt(2); - const double sqrt2_d = sqrt(2); - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - float a; - float resultS; - float resultS1; - float resultS2; - double c; - double resultD; - double resultD1; - double resultD2; - }; - TestFloat test; - - double inputs_D[kTableLength] = { - 0.0L, 4.0L, 2.0L, 4e-28L - }; - - double outputs_D[kTableLength] = { - 0.0L, 2.0L, sqrt2_d, 2e-14L - }; - float inputs_S[kTableLength] = { - 0.0, 4.0, 2.0, 4e-28 - }; - - float outputs_S[kTableLength] = { - 0.0, 2.0, sqrt2_s, 2e-14 - }; - - - __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); - __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c))); - __ sqrt_s(f6, f2); - __ sqrt_d(f12, f8); - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - __ rsqrt_d(f14, f8); - __ rsqrt_s(f16, f2); - __ recip_d(f18, f8); - __ recip_s(f4, f2); - } - __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) ); - __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD))); - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - __ swc1(f16, MemOperand(a0, offsetof(TestFloat, resultS1)) ); - __ Sdc1(f14, MemOperand(a0, offsetof(TestFloat, resultD1))); - __ swc1(f4, MemOperand(a0, offsetof(TestFloat, resultS2)) ); - __ Sdc1(f18, MemOperand(a0, offsetof(TestFloat, resultD2))); - } - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - for (int i = 0; i < kTableLength; i++) { - float f1; - double d1; - test.a = inputs_S[i]; - test.c = inputs_D[i]; - - (f.Call(&test, 0, 0, 0, 0)); - - CHECK_EQ(test.resultS, outputs_S[i]); - CHECK_EQ(test.resultD, outputs_D[i]); - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - if (i != 0) { - f1 = test.resultS1 - 1.0F/outputs_S[i]; - f1 = (f1 < 0) ? f1 : -f1; - CHECK(f1 <= deltaFloat); - d1 = test.resultD1 - 1.0L/outputs_D[i]; - d1 = (d1 < 0) ? d1 : -d1; - CHECK(d1 <= deltaDouble); - f1 = test.resultS2 - 1.0F/inputs_S[i]; - f1 = (f1 < 0) ? f1 : -f1; - CHECK(f1 <= deltaFloat); - d1 = test.resultD2 - 1.0L/inputs_D[i]; - d1 = (d1 < 0) ? d1 : -d1; - CHECK(d1 <= deltaDouble); - } else { - CHECK_EQ(test.resultS1, 1.0F/outputs_S[i]); - CHECK_EQ(test.resultD1, 1.0L/outputs_D[i]); - CHECK_EQ(test.resultS2, 1.0F/inputs_S[i]); - CHECK_EQ(test.resultD2, 1.0L/inputs_D[i]); - } - } - } -} - - -TEST(neg) { - const int kTableLength = 3; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - float a; - float resultS; - double c; - double resultD; - }; - - TestFloat test; - double inputs_D[kTableLength] = { - 0.0, 4.0, -2.0 - }; - - double outputs_D[kTableLength] = { - 0.0, -4.0, 2.0 - }; - float inputs_S[kTableLength] = { - 0.0, 4.0, -2.0 - }; - - float outputs_S[kTableLength] = { - 0.0, -4.0, 2.0 - }; - __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); - __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, c))); - __ neg_s(f6, f2); - __ neg_d(f12, f8); - __ swc1(f6, MemOperand(a0, offsetof(TestFloat, resultS)) ); - __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD))); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_S[i]; - test.c = inputs_D[i]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.resultS, outputs_S[i]); - CHECK_EQ(test.resultD, outputs_D[i]); - } -} - - -TEST(mul) { - const int kTableLength = 4; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - float a; - float b; - float resultS; - double c; - double d; - double resultD; - }; - - TestFloat test; - double inputfs_D[kTableLength] = { - 5.3, -5.3, 5.3, -2.9 - }; - double inputft_D[kTableLength] = { - 4.8, 4.8, -4.8, -0.29 - }; - - float inputfs_S[kTableLength] = { - 5.3, -5.3, 5.3, -2.9 - }; - float inputft_S[kTableLength] = { - 4.8, 4.8, -4.8, -0.29 - }; - - __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, a)) ); - __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b)) ); - __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, c))); - __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, d))); - __ mul_s(f10, f2, f4); - __ mul_d(f12, f6, f8); - __ swc1(f10, MemOperand(a0, offsetof(TestFloat, resultS)) ); - __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, resultD))); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputfs_S[i]; - test.b = inputft_S[i]; - test.c = inputfs_D[i]; - test.d = inputft_D[i]; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.resultS, inputfs_S[i]*inputft_S[i]); - CHECK_EQ(test.resultD, inputfs_D[i]*inputft_D[i]); - } -} - - -TEST(mov) { - const int kTableLength = 4; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - double a; - double b; - float c; - float d; - }; - - TestFloat test; - double inputs_D[kTableLength] = { - 5.3, -5.3, 5.3, -2.9 - }; - double inputs_S[kTableLength] = { - 4.8, 4.8, -4.8, -0.29 - }; - - float outputs_S[kTableLength] = { - 4.8, 4.8, -4.8, -0.29 - }; - double outputs_D[kTableLength] = { - 5.3, -5.3, 5.3, -2.9 - }; - - __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); - __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, c)) ); - __ mov_s(f8, f6); - __ mov_d(f10, f4); - __ swc1(f8, MemOperand(a0, offsetof(TestFloat, d)) ); - __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, b))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.c = inputs_S[i]; - - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.b, outputs_D[i]); - CHECK_EQ(test.d, outputs_S[i]); - } -} - - -TEST(floor_w) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct Test { - uint32_t isNaN2008; - double a; - float b; - int32_t c; // a floor result - int32_t d; // b floor result - }; - const int kTableLength = 15; - double inputs_D[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - float inputs_S[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - double outputs[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, - kFPUInvalidResult, kFPUInvalidResult, - kFPUInvalidResult}; - double outputsNaN2008[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, - kFPUInvalidResult, - 0, - kFPUInvalidResult}; - - __ cfc1(t1, FCSR); - __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); - __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); - __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); - __ floor_w_d(f8, f4); - __ floor_w_s(f10, f6); - __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); - __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); - __ jr(ra); - __ nop(); - Test test; - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.b = inputs_S[i]; - (f.Call(&test, 0, 0, 0, 0)); - if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) { - CHECK_EQ(test.c, outputsNaN2008[i]); - } else { - CHECK_EQ(test.c, outputs[i]); - } - CHECK_EQ(test.d, test.c); - } -} - - -TEST(floor_l) { - if (IsFp64Mode()) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - const double dFPU64InvalidResult = static_cast(kFPU64InvalidResult); - struct Test { - uint32_t isNaN2008; - double a; - float b; - int64_t c; - int64_t d; - }; - const int kTableLength = 15; - double inputs_D[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - float inputs_S[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - double outputs[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, - 2147483648.0, dFPU64InvalidResult, - dFPU64InvalidResult}; - double outputsNaN2008[kTableLength] = { - 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, - -3.0, -3.0, -3.0, -4.0, -4.0, -4.0, - 2147483648.0, - 0, - dFPU64InvalidResult}; - - __ cfc1(t1, FCSR); - __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); - __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); - __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); - __ floor_l_d(f8, f4); - __ floor_l_s(f10, f6); - __ Sdc1(f8, MemOperand(a0, offsetof(Test, c))); - __ Sdc1(f10, MemOperand(a0, offsetof(Test, d))); - __ jr(ra); - __ nop(); - Test test; - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.b = inputs_S[i]; - (f.Call(&test, 0, 0, 0, 0)); - if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && - kArchVariant == kMips32r6) { - CHECK_EQ(test.c, outputsNaN2008[i]); - } else { - CHECK_EQ(test.c, outputs[i]); - } - CHECK_EQ(test.d, test.c); - } - } -} - - -TEST(ceil_w) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct Test { - uint32_t isNaN2008; - double a; - float b; - int32_t c; // a floor result - int32_t d; // b floor result - }; - const int kTableLength = 15; - double inputs_D[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - float inputs_S[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - double outputs[kTableLength] = { - 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - kFPUInvalidResult, kFPUInvalidResult, - kFPUInvalidResult}; - double outputsNaN2008[kTableLength] = { - 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - kFPUInvalidResult, - 0, - kFPUInvalidResult}; - - __ cfc1(t1, FCSR); - __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); - __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); - __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); - __ ceil_w_d(f8, f4); - __ ceil_w_s(f10, f6); - __ swc1(f8, MemOperand(a0, offsetof(Test, c)) ); - __ swc1(f10, MemOperand(a0, offsetof(Test, d)) ); - __ jr(ra); - __ nop(); - Test test; - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.b = inputs_S[i]; - (f.Call(&test, 0, 0, 0, 0)); - if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && kArchVariant == kMips32r6) { - CHECK_EQ(test.c, outputsNaN2008[i]); - } else { - CHECK_EQ(test.c, outputs[i]); - } - CHECK_EQ(test.d, test.c); - } -} - - -TEST(ceil_l) { - if (IsFp64Mode()) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - const double dFPU64InvalidResult = static_cast(kFPU64InvalidResult); - struct Test { - uint32_t isNaN2008; - double a; - float b; - int64_t c; - int64_t d; - }; - const int kTableLength = 15; - double inputs_D[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - float inputs_S[kTableLength] = { - 2.1, 2.6, 2.5, 3.1, 3.6, 3.5, - -2.1, -2.6, -2.5, -3.1, -3.6, -3.5, - 2147483648.0, - std::numeric_limits::quiet_NaN(), - std::numeric_limits::infinity() - }; - double outputs[kTableLength] = { - 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 2147483648.0, dFPU64InvalidResult, - dFPU64InvalidResult}; - double outputsNaN2008[kTableLength] = { - 3.0, 3.0, 3.0, 4.0, 4.0, 4.0, - -2.0, -2.0, -2.0, -3.0, -3.0, -3.0, - 2147483648.0, - 0, - dFPU64InvalidResult}; - - __ cfc1(t1, FCSR); - __ sw(t1, MemOperand(a0, offsetof(Test, isNaN2008))); - __ Ldc1(f4, MemOperand(a0, offsetof(Test, a))); - __ lwc1(f6, MemOperand(a0, offsetof(Test, b)) ); - __ ceil_l_d(f8, f4); - __ ceil_l_s(f10, f6); - __ Sdc1(f8, MemOperand(a0, offsetof(Test, c))); - __ Sdc1(f10, MemOperand(a0, offsetof(Test, d))); - __ jr(ra); - __ nop(); - Test test; - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputs_D[i]; - test.b = inputs_S[i]; - (f.Call(&test, 0, 0, 0, 0)); - if ((test.isNaN2008 & kFCSRNaN2008FlagMask) && - kArchVariant == kMips32r6) { - CHECK_EQ(test.c, outputsNaN2008[i]); - } else { - CHECK_EQ(test.c, outputs[i]); - } - CHECK_EQ(test.d, test.c); - } - } -} - - -TEST(jump_tables1) { - // Test jump tables with forward jumps. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - Assembler assm(AssemblerOptions{}); - - const int kNumCases = 512; - int values[kNumCases]; - isolate->random_number_generator()->NextBytes(values, sizeof(values)); - Label labels[kNumCases]; - - __ addiu(sp, sp, -4); - __ sw(ra, MemOperand(sp)); - - Label done; - { - __ BlockTrampolinePoolFor(kNumCases + 7); - - __ nal(); - __ nop(); - __ sll(at, a0, 2); - __ addu(at, at, ra); - __ lw(at, MemOperand(at, 5 * kInstrSize)); - __ jr(at); - __ nop(); - for (int i = 0; i < kNumCases; ++i) { - __ dd(&labels[i]); - } - } - - for (int i = 0; i < kNumCases; ++i) { - __ bind(&labels[i]); - __ lui(v0, (values[i] >> 16) & 0xFFFF); - __ ori(v0, v0, values[i] & 0xFFFF); - __ b(&done); - __ nop(); - } - - __ bind(&done); - __ lw(ra, MemOperand(sp)); - __ addiu(sp, sp, 4); - __ jr(ra); - __ nop(); - - CHECK_EQ(0, assm.UnboundLabelsCount()); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kNumCases; ++i) { - int res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); - ::printf("f(%d) = %d\n", i, res); - CHECK_EQ(values[i], res); - } -} - - -TEST(jump_tables2) { - // Test jump tables with backward jumps. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - Assembler assm(AssemblerOptions{}); - - const int kNumCases = 512; - int values[kNumCases]; - isolate->random_number_generator()->NextBytes(values, sizeof(values)); - Label labels[kNumCases]; - - __ addiu(sp, sp, -4); - __ sw(ra, MemOperand(sp)); - - Label done, dispatch; - __ b(&dispatch); - __ nop(); - - for (int i = 0; i < kNumCases; ++i) { - __ bind(&labels[i]); - __ lui(v0, (values[i] >> 16) & 0xFFFF); - __ ori(v0, v0, values[i] & 0xFFFF); - __ b(&done); - __ nop(); - } - - __ bind(&dispatch); - { - __ BlockTrampolinePoolFor(kNumCases + 7); - - __ nal(); - __ nop(); - __ sll(at, a0, 2); - __ addu(at, at, ra); - __ lw(at, MemOperand(at, 5 * kInstrSize)); - __ jr(at); - __ nop(); - for (int i = 0; i < kNumCases; ++i) { - __ dd(&labels[i]); - } - } - - __ bind(&done); - __ lw(ra, MemOperand(sp)); - __ addiu(sp, sp, 4); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kNumCases; ++i) { - int res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); - ::printf("f(%d) = %d\n", i, res); - CHECK_EQ(values[i], res); - } -} - - -TEST(jump_tables3) { - // Test jump tables with backward jumps and embedded heap objects. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - Assembler assm(AssemblerOptions{}); - - const int kNumCases = 256; - Handle values[kNumCases]; - for (int i = 0; i < kNumCases; ++i) { - double value = isolate->random_number_generator()->NextDouble(); - values[i] = isolate->factory()->NewHeapNumber(value); - } - Label labels[kNumCases]; - Object obj; - int32_t imm32; - - __ addiu(sp, sp, -4); - __ sw(ra, MemOperand(sp)); - - Label done, dispatch; - __ b(&dispatch); - - - for (int i = 0; i < kNumCases; ++i) { - __ bind(&labels[i]); - obj = *values[i]; - imm32 = obj.ptr(); - __ lui(v0, (imm32 >> 16) & 0xFFFF); - __ ori(v0, v0, imm32 & 0xFFFF); - __ b(&done); - __ nop(); - } - - __ bind(&dispatch); - { - __ BlockTrampolinePoolFor(kNumCases + 7); - - __ nal(); - __ nop(); - __ sll(at, a0, 2); - __ addu(at, at, ra); - __ lw(at, MemOperand(at, 5 * kInstrSize)); - __ jr(at); - __ nop(); - for (int i = 0; i < kNumCases; ++i) { - __ dd(&labels[i]); - } - } - - __ bind(&done); - __ lw(ra, MemOperand(sp)); - __ addiu(sp, sp, 4); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kNumCases; ++i) { - Handle result( - Object(reinterpret_cast
(f.Call(i, 0, 0, 0, 0))), isolate); -#ifdef OBJECT_PRINT - ::printf("f(%d) = ", i); - result->Print(std::cout); - ::printf("\n"); -#endif - CHECK(values[i].is_identical_to(result)); - } -} - - -TEST(BITSWAP) { - // Test BITSWAP - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - int32_t r1; - int32_t r2; - int32_t r3; - int32_t r4; - }; - T t; - - Assembler assm(AssemblerOptions{}); - - __ lw(a2, MemOperand(a0, offsetof(T, r1))); - __ nop(); - __ bitswap(a1, a2); - __ sw(a1, MemOperand(a0, offsetof(T, r1))); - - __ lw(a2, MemOperand(a0, offsetof(T, r2))); - __ nop(); - __ bitswap(a1, a2); - __ sw(a1, MemOperand(a0, offsetof(T, r2))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.r1 = 0x781A15C3; - t.r2 = 0x8B71FCDE; - f.Call(&t, 0, 0, 0, 0); - - CHECK_EQ(static_cast(0x1E58A8C3), t.r1); - CHECK_EQ(static_cast(0xD18E3F7B), t.r2); - } -} - - -TEST(class_fmt) { - if (IsMipsArchVariant(kMips32r6)) { - // Test CLASS.fmt instruction. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - double dSignalingNan; - double dQuietNan; - double dNegInf; - double dNegNorm; - double dNegSubnorm; - double dNegZero; - double dPosInf; - double dPosNorm; - double dPosSubnorm; - double dPosZero; - float fSignalingNan; - float fQuietNan; - float fNegInf; - float fNegNorm; - float fNegSubnorm; - float fNegZero; - float fPosInf; - float fPosNorm; - float fPosSubnorm; - float fPosZero; - }; - T t; - - // Create a function that accepts &t, and loads, manipulates, and stores - // the doubles t.a ... t.f. - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dSignalingNan))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dSignalingNan))); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dQuietNan))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dQuietNan))); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegInf))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegInf))); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegNorm))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegNorm))); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegSubnorm))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegSubnorm))); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dNegZero))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dNegZero))); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosInf))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosInf))); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosNorm))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosNorm))); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosSubnorm))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosSubnorm))); - - __ Ldc1(f4, MemOperand(a0, offsetof(T, dPosZero))); - __ class_d(f6, f4); - __ Sdc1(f6, MemOperand(a0, offsetof(T, dPosZero))); - - // Testing instruction CLASS.S - __ lwc1(f4, MemOperand(a0, offsetof(T, fSignalingNan))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fSignalingNan))); - - __ lwc1(f4, MemOperand(a0, offsetof(T, fQuietNan))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fQuietNan))); - - __ lwc1(f4, MemOperand(a0, offsetof(T, fNegInf))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fNegInf))); - - __ lwc1(f4, MemOperand(a0, offsetof(T, fNegNorm))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fNegNorm))); - - __ lwc1(f4, MemOperand(a0, offsetof(T, fNegSubnorm))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fNegSubnorm))); - - __ lwc1(f4, MemOperand(a0, offsetof(T, fNegZero))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fNegZero))); - - __ lwc1(f4, MemOperand(a0, offsetof(T, fPosInf))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fPosInf))); - - __ lwc1(f4, MemOperand(a0, offsetof(T, fPosNorm))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fPosNorm))); - - __ lwc1(f4, MemOperand(a0, offsetof(T, fPosSubnorm))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fPosSubnorm))); - - __ lwc1(f4, MemOperand(a0, offsetof(T, fPosZero))); - __ class_s(f6, f4); - __ swc1(f6, MemOperand(a0, offsetof(T, fPosZero))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - t.dSignalingNan = std::numeric_limits::signaling_NaN(); - t.dQuietNan = std::numeric_limits::quiet_NaN(); - t.dNegInf = -1.0 / 0.0; - t.dNegNorm = -5.0; - t.dNegSubnorm = -DBL_MIN / 2.0; - t.dNegZero = -0.0; - t.dPosInf = 2.0 / 0.0; - t.dPosNorm = 275.35; - t.dPosSubnorm = DBL_MIN / 2.0; - t.dPosZero = +0.0; - // Float test values - - t.fSignalingNan = std::numeric_limits::signaling_NaN(); - t.fQuietNan = std::numeric_limits::quiet_NaN(); - t.fNegInf = -0.5/0.0; - t.fNegNorm = -FLT_MIN; - t.fNegSubnorm = -FLT_MIN / 1.5; - t.fNegZero = -0.0; - t.fPosInf = 100000.0 / 0.0; - t.fPosNorm = FLT_MAX; - t.fPosSubnorm = FLT_MIN / 20.0; - t.fPosZero = +0.0; - - f.Call(&t, 0, 0, 0, 0); - // Expected double results. - CHECK_EQ(base::bit_cast(t.dSignalingNan), 0x001); - CHECK_EQ(base::bit_cast(t.dQuietNan), 0x002); - CHECK_EQ(base::bit_cast(t.dNegInf), 0x004); - CHECK_EQ(base::bit_cast(t.dNegNorm), 0x008); - CHECK_EQ(base::bit_cast(t.dNegSubnorm), 0x010); - CHECK_EQ(base::bit_cast(t.dNegZero), 0x020); - CHECK_EQ(base::bit_cast(t.dPosInf), 0x040); - CHECK_EQ(base::bit_cast(t.dPosNorm), 0x080); - CHECK_EQ(base::bit_cast(t.dPosSubnorm), 0x100); - CHECK_EQ(base::bit_cast(t.dPosZero), 0x200); - - // Expected float results. - CHECK_EQ(base::bit_cast(t.fSignalingNan), 0x001); - CHECK_EQ(base::bit_cast(t.fQuietNan), 0x002); - CHECK_EQ(base::bit_cast(t.fNegInf), 0x004); - CHECK_EQ(base::bit_cast(t.fNegNorm), 0x008); - CHECK_EQ(base::bit_cast(t.fNegSubnorm), 0x010); - CHECK_EQ(base::bit_cast(t.fNegZero), 0x020); - CHECK_EQ(base::bit_cast(t.fPosInf), 0x040); - CHECK_EQ(base::bit_cast(t.fPosNorm), 0x080); - CHECK_EQ(base::bit_cast(t.fPosSubnorm), 0x100); - CHECK_EQ(base::bit_cast(t.fPosZero), 0x200); - } -} - - -TEST(ABS) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - int64_t fir; - double a; - float b; - double fcsr; - }; - - TestFloat test; - - // Save FIR. - __ cfc1(a1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); - - __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); - __ abs_d(f10, f4); - __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, a))); - - __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, b))); - __ abs_s(f10, f4); - __ swc1(f10, MemOperand(a0, offsetof(TestFloat, b))); - - // Restore FCSR. - __ ctc1(a1, FCSR); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - test.a = -2.0; - test.b = -2.0; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.a, 2.0); - CHECK_EQ(test.b, 2.0); - - test.a = 2.0; - test.b = 2.0; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.a, 2.0); - CHECK_EQ(test.b, 2.0); - - // Testing biggest positive number - test.a = std::numeric_limits::max(); - test.b = std::numeric_limits::max(); - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.a, std::numeric_limits::max()); - CHECK_EQ(test.b, std::numeric_limits::max()); - - // Testing smallest negative number - test.a = -std::numeric_limits::max(); // lowest() - test.b = -std::numeric_limits::max(); // lowest() - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.a, std::numeric_limits::max()); - CHECK_EQ(test.b, std::numeric_limits::max()); - - // Testing smallest positive number - test.a = -std::numeric_limits::min(); - test.b = -std::numeric_limits::min(); - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.a, std::numeric_limits::min()); - CHECK_EQ(test.b, std::numeric_limits::min()); - - // Testing infinity - test.a = -std::numeric_limits::max() - / std::numeric_limits::min(); - test.b = -std::numeric_limits::max() - / std::numeric_limits::min(); - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.a, std::numeric_limits::max() - / std::numeric_limits::min()); - CHECK_EQ(test.b, std::numeric_limits::max() - / std::numeric_limits::min()); - - test.a = std::numeric_limits::quiet_NaN(); - test.b = std::numeric_limits::quiet_NaN(); - (f.Call(&test, 0, 0, 0, 0)); - CHECK(std::isnan(test.a)); - CHECK(std::isnan(test.b)); - - test.a = std::numeric_limits::signaling_NaN(); - test.b = std::numeric_limits::signaling_NaN(); - (f.Call(&test, 0, 0, 0, 0)); - CHECK(std::isnan(test.a)); - CHECK(std::isnan(test.b)); -} - - -TEST(ADD_FMT) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - double a; - double b; - double c; - float fa; - float fb; - float fc; - }; - - TestFloat test; - - __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); - __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); - __ add_d(f10, f8, f4); - __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); - - __ lwc1(f4, MemOperand(a0, offsetof(TestFloat, fa))); - __ lwc1(f8, MemOperand(a0, offsetof(TestFloat, fb))); - __ add_s(f10, f8, f4); - __ swc1(f10, MemOperand(a0, offsetof(TestFloat, fc))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - test.a = 2.0; - test.b = 3.0; - test.fa = 2.0; - test.fb = 3.0; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.c, 5.0); - CHECK_EQ(test.fc, 5.0); - - test.a = std::numeric_limits::max(); - test.b = -std::numeric_limits::max(); // lowest() - test.fa = std::numeric_limits::max(); - test.fb = -std::numeric_limits::max(); // lowest() - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.c, 0.0); - CHECK_EQ(test.fc, 0.0); - - test.a = std::numeric_limits::max(); - test.b = std::numeric_limits::max(); - test.fa = std::numeric_limits::max(); - test.fb = std::numeric_limits::max(); - (f.Call(&test, 0, 0, 0, 0)); - CHECK(!std::isfinite(test.c)); - CHECK(!std::isfinite(test.fc)); - - test.a = 5.0; - test.b = std::numeric_limits::signaling_NaN(); - test.fa = 5.0; - test.fb = std::numeric_limits::signaling_NaN(); - (f.Call(&test, 0, 0, 0, 0)); - CHECK(std::isnan(test.c)); - CHECK(std::isnan(test.fc)); -} - - -TEST(C_COND_FMT) { - if ((IsMipsArchVariant(kMips32r1)) || (IsMipsArchVariant(kMips32r2))) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - double dOp1; - double dOp2; - uint32_t dF; - uint32_t dUn; - uint32_t dEq; - uint32_t dUeq; - uint32_t dOlt; - uint32_t dUlt; - uint32_t dOle; - uint32_t dUle; - float fOp1; - float fOp2; - uint32_t fF; - uint32_t fUn; - uint32_t fEq; - uint32_t fUeq; - uint32_t fOlt; - uint32_t fUlt; - uint32_t fOle; - uint32_t fUle; - }; - - TestFloat test; - - __ li(t1, 1); - - __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1))); - __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2))); - - __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1))); - __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2))); - - __ mov(t2, zero_reg); - __ mov(t3, zero_reg); - __ c_d(F, f4, f6, 0); - __ c_s(F, f14, f16, 2); - __ movt(t2, t1, 0); - __ movt(t3, t1, 2); - __ sw(t2, MemOperand(a0, offsetof(TestFloat, dF)) ); - __ sw(t3, MemOperand(a0, offsetof(TestFloat, fF)) ); - - __ mov(t2, zero_reg); - __ mov(t3, zero_reg); - __ c_d(UN, f4, f6, 2); - __ c_s(UN, f14, f16, 4); - __ movt(t2, t1, 2); - __ movt(t3, t1, 4); - __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUn)) ); - __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUn)) ); - - __ mov(t2, zero_reg); - __ mov(t3, zero_reg); - __ c_d(EQ, f4, f6, 4); - __ c_s(EQ, f14, f16, 6); - __ movt(t2, t1, 4); - __ movt(t3, t1, 6); - __ sw(t2, MemOperand(a0, offsetof(TestFloat, dEq)) ); - __ sw(t3, MemOperand(a0, offsetof(TestFloat, fEq)) ); - - __ mov(t2, zero_reg); - __ mov(t3, zero_reg); - __ c_d(UEQ, f4, f6, 6); - __ c_s(UEQ, f14, f16, 0); - __ movt(t2, t1, 6); - __ movt(t3, t1, 0); - __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUeq)) ); - __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUeq)) ); - - __ mov(t2, zero_reg); - __ mov(t3, zero_reg); - __ c_d(OLT, f4, f6, 0); - __ c_s(OLT, f14, f16, 2); - __ movt(t2, t1, 0); - __ movt(t3, t1, 2); - __ sw(t2, MemOperand(a0, offsetof(TestFloat, dOlt)) ); - __ sw(t3, MemOperand(a0, offsetof(TestFloat, fOlt)) ); - - __ mov(t2, zero_reg); - __ mov(t3, zero_reg); - __ c_d(ULT, f4, f6, 2); - __ c_s(ULT, f14, f16, 4); - __ movt(t2, t1, 2); - __ movt(t3, t1, 4); - __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUlt)) ); - __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUlt)) ); - - __ mov(t2, zero_reg); - __ mov(t3, zero_reg); - __ c_d(OLE, f4, f6, 4); - __ c_s(OLE, f14, f16, 6); - __ movt(t2, t1, 4); - __ movt(t3, t1, 6); - __ sw(t2, MemOperand(a0, offsetof(TestFloat, dOle)) ); - __ sw(t3, MemOperand(a0, offsetof(TestFloat, fOle)) ); - - __ mov(t2, zero_reg); - __ mov(t3, zero_reg); - __ c_d(ULE, f4, f6, 6); - __ c_s(ULE, f14, f16, 0); - __ movt(t2, t1, 6); - __ movt(t3, t1, 0); - __ sw(t2, MemOperand(a0, offsetof(TestFloat, dUle)) ); - __ sw(t3, MemOperand(a0, offsetof(TestFloat, fUle)) ); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - test.dOp1 = 2.0; - test.dOp2 = 3.0; - test.fOp1 = 2.0; - test.fOp2 = 3.0; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.dF, 0U); - CHECK_EQ(test.dUn, 0U); - CHECK_EQ(test.dEq, 0U); - CHECK_EQ(test.dUeq, 0U); - CHECK_EQ(test.dOlt, 1U); - CHECK_EQ(test.dUlt, 1U); - CHECK_EQ(test.dOle, 1U); - CHECK_EQ(test.dUle, 1U); - CHECK_EQ(test.fF, 0U); - CHECK_EQ(test.fUn, 0U); - CHECK_EQ(test.fEq, 0U); - CHECK_EQ(test.fUeq, 0U); - CHECK_EQ(test.fOlt, 1U); - CHECK_EQ(test.fUlt, 1U); - CHECK_EQ(test.fOle, 1U); - CHECK_EQ(test.fUle, 1U); - - test.dOp1 = std::numeric_limits::max(); - test.dOp2 = std::numeric_limits::min(); - test.fOp1 = std::numeric_limits::min(); - test.fOp2 = -std::numeric_limits::max(); // lowest() - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.dF, 0U); - CHECK_EQ(test.dUn, 0U); - CHECK_EQ(test.dEq, 0U); - CHECK_EQ(test.dUeq, 0U); - CHECK_EQ(test.dOlt, 0U); - CHECK_EQ(test.dUlt, 0U); - CHECK_EQ(test.dOle, 0U); - CHECK_EQ(test.dUle, 0U); - CHECK_EQ(test.fF, 0U); - CHECK_EQ(test.fUn, 0U); - CHECK_EQ(test.fEq, 0U); - CHECK_EQ(test.fUeq, 0U); - CHECK_EQ(test.fOlt, 0U); - CHECK_EQ(test.fUlt, 0U); - CHECK_EQ(test.fOle, 0U); - CHECK_EQ(test.fUle, 0U); - - test.dOp1 = -std::numeric_limits::max(); // lowest() - test.dOp2 = -std::numeric_limits::max(); // lowest() - test.fOp1 = std::numeric_limits::max(); - test.fOp2 = std::numeric_limits::max(); - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.dF, 0U); - CHECK_EQ(test.dUn, 0U); - CHECK_EQ(test.dEq, 1U); - CHECK_EQ(test.dUeq, 1U); - CHECK_EQ(test.dOlt, 0U); - CHECK_EQ(test.dUlt, 0U); - CHECK_EQ(test.dOle, 1U); - CHECK_EQ(test.dUle, 1U); - CHECK_EQ(test.fF, 0U); - CHECK_EQ(test.fUn, 0U); - CHECK_EQ(test.fEq, 1U); - CHECK_EQ(test.fUeq, 1U); - CHECK_EQ(test.fOlt, 0U); - CHECK_EQ(test.fUlt, 0U); - CHECK_EQ(test.fOle, 1U); - CHECK_EQ(test.fUle, 1U); - - test.dOp1 = std::numeric_limits::quiet_NaN(); - test.dOp2 = 0.0; - test.fOp1 = std::numeric_limits::quiet_NaN(); - test.fOp2 = 0.0; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.dF, 0U); - CHECK_EQ(test.dUn, 1U); - CHECK_EQ(test.dEq, 0U); - CHECK_EQ(test.dUeq, 1U); - CHECK_EQ(test.dOlt, 0U); - CHECK_EQ(test.dUlt, 1U); - CHECK_EQ(test.dOle, 0U); - CHECK_EQ(test.dUle, 1U); - CHECK_EQ(test.fF, 0U); - CHECK_EQ(test.fUn, 1U); - CHECK_EQ(test.fEq, 0U); - CHECK_EQ(test.fUeq, 1U); - CHECK_EQ(test.fOlt, 0U); - CHECK_EQ(test.fUlt, 1U); - CHECK_EQ(test.fOle, 0U); - CHECK_EQ(test.fUle, 1U); - } -} - - -TEST(CMP_COND_FMT) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - double dOp1; - double dOp2; - double dF; - double dUn; - double dEq; - double dUeq; - double dOlt; - double dUlt; - double dOle; - double dUle; - double dOr; - double dUne; - double dNe; - float fOp1; - float fOp2; - float fF; - float fUn; - float fEq; - float fUeq; - float fOlt; - float fUlt; - float fOle; - float fUle; - float fOr; - float fUne; - float fNe; - }; - - TestFloat test; - - __ li(t1, 1); - - __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, dOp1))); - __ Ldc1(f6, MemOperand(a0, offsetof(TestFloat, dOp2))); - - __ lwc1(f14, MemOperand(a0, offsetof(TestFloat, fOp1))); - __ lwc1(f16, MemOperand(a0, offsetof(TestFloat, fOp2))); - - __ cmp_d(F, f2, f4, f6); - __ cmp_s(F, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dF))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fF)) ); - - __ cmp_d(UN, f2, f4, f6); - __ cmp_s(UN, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUn))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUn)) ); - - __ cmp_d(EQ, f2, f4, f6); - __ cmp_s(EQ, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dEq))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fEq)) ); - - __ cmp_d(UEQ, f2, f4, f6); - __ cmp_s(UEQ, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUeq))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUeq)) ); - - __ cmp_d(LT, f2, f4, f6); - __ cmp_s(LT, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOlt))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOlt)) ); - - __ cmp_d(ULT, f2, f4, f6); - __ cmp_s(ULT, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUlt))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUlt)) ); - - __ cmp_d(LE, f2, f4, f6); - __ cmp_s(LE, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOle))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOle)) ); - - __ cmp_d(ULE, f2, f4, f6); - __ cmp_s(ULE, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUle))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUle)) ); - - __ cmp_d(ORD, f2, f4, f6); - __ cmp_s(ORD, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dOr))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fOr)) ); - - __ cmp_d(UNE, f2, f4, f6); - __ cmp_s(UNE, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dUne))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fUne)) ); - - __ cmp_d(NE, f2, f4, f6); - __ cmp_s(NE, f12, f14, f16); - __ Sdc1(f2, MemOperand(a0, offsetof(TestFloat, dNe))); - __ swc1(f12, MemOperand(a0, offsetof(TestFloat, fNe)) ); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - uint64_t dTrue = 0xFFFFFFFFFFFFFFFF; - uint64_t dFalse = 0x0000000000000000; - uint32_t fTrue = 0xFFFFFFFF; - uint32_t fFalse = 0x00000000; - - test.dOp1 = 2.0; - test.dOp2 = 3.0; - test.fOp1 = 2.0; - test.fOp2 = 3.0; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(base::bit_cast(test.dF), dFalse); - CHECK_EQ(base::bit_cast(test.dUn), dFalse); - CHECK_EQ(base::bit_cast(test.dEq), dFalse); - CHECK_EQ(base::bit_cast(test.dUeq), dFalse); - CHECK_EQ(base::bit_cast(test.dOlt), dTrue); - CHECK_EQ(base::bit_cast(test.dUlt), dTrue); - CHECK_EQ(base::bit_cast(test.dOle), dTrue); - CHECK_EQ(base::bit_cast(test.dUle), dTrue); - CHECK_EQ(base::bit_cast(test.dOr), dTrue); - CHECK_EQ(base::bit_cast(test.dUne), dTrue); - CHECK_EQ(base::bit_cast(test.dNe), dTrue); - CHECK_EQ(base::bit_cast(test.fF), fFalse); - CHECK_EQ(base::bit_cast(test.fUn), fFalse); - CHECK_EQ(base::bit_cast(test.fEq), fFalse); - CHECK_EQ(base::bit_cast(test.fUeq), fFalse); - CHECK_EQ(base::bit_cast(test.fOlt), fTrue); - CHECK_EQ(base::bit_cast(test.fUlt), fTrue); - CHECK_EQ(base::bit_cast(test.fOle), fTrue); - CHECK_EQ(base::bit_cast(test.fUle), fTrue); - - test.dOp1 = std::numeric_limits::max(); - test.dOp2 = std::numeric_limits::min(); - test.fOp1 = std::numeric_limits::min(); - test.fOp2 = -std::numeric_limits::max(); // lowest() - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(base::bit_cast(test.dF), dFalse); - CHECK_EQ(base::bit_cast(test.dUn), dFalse); - CHECK_EQ(base::bit_cast(test.dEq), dFalse); - CHECK_EQ(base::bit_cast(test.dUeq), dFalse); - CHECK_EQ(base::bit_cast(test.dOlt), dFalse); - CHECK_EQ(base::bit_cast(test.dUlt), dFalse); - CHECK_EQ(base::bit_cast(test.dOle), dFalse); - CHECK_EQ(base::bit_cast(test.dUle), dFalse); - CHECK_EQ(base::bit_cast(test.dOr), dTrue); - CHECK_EQ(base::bit_cast(test.dUne), dTrue); - CHECK_EQ(base::bit_cast(test.dNe), dTrue); - CHECK_EQ(base::bit_cast(test.fF), fFalse); - CHECK_EQ(base::bit_cast(test.fUn), fFalse); - CHECK_EQ(base::bit_cast(test.fEq), fFalse); - CHECK_EQ(base::bit_cast(test.fUeq), fFalse); - CHECK_EQ(base::bit_cast(test.fOlt), fFalse); - CHECK_EQ(base::bit_cast(test.fUlt), fFalse); - CHECK_EQ(base::bit_cast(test.fOle), fFalse); - CHECK_EQ(base::bit_cast(test.fUle), fFalse); - - test.dOp1 = -std::numeric_limits::max(); // lowest() - test.dOp2 = -std::numeric_limits::max(); // lowest() - test.fOp1 = std::numeric_limits::max(); - test.fOp2 = std::numeric_limits::max(); - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(base::bit_cast(test.dF), dFalse); - CHECK_EQ(base::bit_cast(test.dUn), dFalse); - CHECK_EQ(base::bit_cast(test.dEq), dTrue); - CHECK_EQ(base::bit_cast(test.dUeq), dTrue); - CHECK_EQ(base::bit_cast(test.dOlt), dFalse); - CHECK_EQ(base::bit_cast(test.dUlt), dFalse); - CHECK_EQ(base::bit_cast(test.dOle), dTrue); - CHECK_EQ(base::bit_cast(test.dUle), dTrue); - CHECK_EQ(base::bit_cast(test.dOr), dTrue); - CHECK_EQ(base::bit_cast(test.dUne), dFalse); - CHECK_EQ(base::bit_cast(test.dNe), dFalse); - CHECK_EQ(base::bit_cast(test.fF), fFalse); - CHECK_EQ(base::bit_cast(test.fUn), fFalse); - CHECK_EQ(base::bit_cast(test.fEq), fTrue); - CHECK_EQ(base::bit_cast(test.fUeq), fTrue); - CHECK_EQ(base::bit_cast(test.fOlt), fFalse); - CHECK_EQ(base::bit_cast(test.fUlt), fFalse); - CHECK_EQ(base::bit_cast(test.fOle), fTrue); - CHECK_EQ(base::bit_cast(test.fUle), fTrue); - - test.dOp1 = std::numeric_limits::quiet_NaN(); - test.dOp2 = 0.0; - test.fOp1 = std::numeric_limits::quiet_NaN(); - test.fOp2 = 0.0; - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(base::bit_cast(test.dF), dFalse); - CHECK_EQ(base::bit_cast(test.dUn), dTrue); - CHECK_EQ(base::bit_cast(test.dEq), dFalse); - CHECK_EQ(base::bit_cast(test.dUeq), dTrue); - CHECK_EQ(base::bit_cast(test.dOlt), dFalse); - CHECK_EQ(base::bit_cast(test.dUlt), dTrue); - CHECK_EQ(base::bit_cast(test.dOle), dFalse); - CHECK_EQ(base::bit_cast(test.dUle), dTrue); - CHECK_EQ(base::bit_cast(test.dOr), dFalse); - CHECK_EQ(base::bit_cast(test.dUne), dTrue); - CHECK_EQ(base::bit_cast(test.dNe), dFalse); - CHECK_EQ(base::bit_cast(test.fF), fFalse); - CHECK_EQ(base::bit_cast(test.fUn), fTrue); - CHECK_EQ(base::bit_cast(test.fEq), fFalse); - CHECK_EQ(base::bit_cast(test.fUeq), fTrue); - CHECK_EQ(base::bit_cast(test.fOlt), fFalse); - CHECK_EQ(base::bit_cast(test.fUlt), fTrue); - CHECK_EQ(base::bit_cast(test.fOle), fFalse); - CHECK_EQ(base::bit_cast(test.fUle), fTrue); - } -} - - -TEST(CVT) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct TestFloat { - float cvt_d_s_in; - double cvt_d_s_out; - int32_t cvt_d_w_in; - double cvt_d_w_out; - int64_t cvt_d_l_in; - double cvt_d_l_out; - - float cvt_l_s_in; - int64_t cvt_l_s_out; - double cvt_l_d_in; - int64_t cvt_l_d_out; - - double cvt_s_d_in; - float cvt_s_d_out; - int32_t cvt_s_w_in; - float cvt_s_w_out; - int64_t cvt_s_l_in; - float cvt_s_l_out; - - float cvt_w_s_in; - int32_t cvt_w_s_out; - double cvt_w_d_in; - int32_t cvt_w_d_out; - }; - - TestFloat test; - - // Save FCSR. - __ cfc1(a1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); - -#define GENERATE_CVT_TEST(x, y, z) \ - __ y##c1(f0, MemOperand(a0, offsetof(TestFloat, x##_in))); \ - __ x(f0, f0); \ - __ nop(); \ - __ z##c1(f0, MemOperand(a0, offsetof(TestFloat, x##_out))); - - GENERATE_CVT_TEST(cvt_d_s, lw, Sd) - GENERATE_CVT_TEST(cvt_d_w, lw, Sd) - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - GENERATE_CVT_TEST(cvt_d_l, Ld, Sd) - } - - if (IsFp64Mode()) { - GENERATE_CVT_TEST(cvt_l_s, lw, Sd) - GENERATE_CVT_TEST(cvt_l_d, Ld, Sd) - } - - GENERATE_CVT_TEST(cvt_s_d, Ld, sw) - GENERATE_CVT_TEST(cvt_s_w, lw, sw) - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - GENERATE_CVT_TEST(cvt_s_l, Ld, sw) - } - - GENERATE_CVT_TEST(cvt_w_s, lw, sw) - GENERATE_CVT_TEST(cvt_w_d, Ld, sw) - - // Restore FCSR. - __ ctc1(a1, FCSR); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - test.cvt_d_s_in = -0.51; - test.cvt_d_w_in = -1; - test.cvt_d_l_in = -1; - test.cvt_l_s_in = -0.51; - test.cvt_l_d_in = -0.51; - test.cvt_s_d_in = -0.51; - test.cvt_s_w_in = -1; - test.cvt_s_l_in = -1; - test.cvt_w_s_in = -0.51; - test.cvt_w_d_in = -0.51; - - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.cvt_d_s_out, static_cast(test.cvt_d_s_in)); - CHECK_EQ(test.cvt_d_w_out, static_cast(test.cvt_d_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_d_l_out, static_cast(test.cvt_d_l_in)); - } - if (IsFp64Mode()) { - CHECK_EQ(-1, test.cvt_l_s_out); - CHECK_EQ(-1, test.cvt_l_d_out); - } - CHECK_EQ(test.cvt_s_d_out, static_cast(test.cvt_s_d_in)); - CHECK_EQ(test.cvt_s_w_out, static_cast(test.cvt_s_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_s_l_out, static_cast(test.cvt_s_l_in)); - } - CHECK_EQ(-1, test.cvt_w_s_out); - CHECK_EQ(-1, test.cvt_w_d_out); - - test.cvt_d_s_in = 0.49; - test.cvt_d_w_in = 1; - test.cvt_d_l_in = 1; - test.cvt_l_s_in = 0.49; - test.cvt_l_d_in = 0.49; - test.cvt_s_d_in = 0.49; - test.cvt_s_w_in = 1; - test.cvt_s_l_in = 1; - test.cvt_w_s_in = 0.49; - test.cvt_w_d_in = 0.49; - - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.cvt_d_s_out, static_cast(test.cvt_d_s_in)); - CHECK_EQ(test.cvt_d_w_out, static_cast(test.cvt_d_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_d_l_out, static_cast(test.cvt_d_l_in)); - } - if (IsFp64Mode()) { - CHECK_EQ(0, test.cvt_l_s_out); - CHECK_EQ(0, test.cvt_l_d_out); - } - CHECK_EQ(test.cvt_s_d_out, static_cast(test.cvt_s_d_in)); - CHECK_EQ(test.cvt_s_w_out, static_cast(test.cvt_s_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_s_l_out, static_cast(test.cvt_s_l_in)); - } - CHECK_EQ(0, test.cvt_w_s_out); - CHECK_EQ(0, test.cvt_w_d_out); - - test.cvt_d_s_in = std::numeric_limits::max(); - test.cvt_d_w_in = std::numeric_limits::max(); - test.cvt_d_l_in = std::numeric_limits::max(); - test.cvt_l_s_in = std::numeric_limits::max(); - test.cvt_l_d_in = std::numeric_limits::max(); - test.cvt_s_d_in = std::numeric_limits::max(); - test.cvt_s_w_in = std::numeric_limits::max(); - test.cvt_s_l_in = std::numeric_limits::max(); - test.cvt_w_s_in = std::numeric_limits::max(); - test.cvt_w_d_in = std::numeric_limits::max(); - - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.cvt_d_s_out, static_cast(test.cvt_d_s_in)); - CHECK_EQ(test.cvt_d_w_out, static_cast(test.cvt_d_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_d_l_out, static_cast(test.cvt_d_l_in)); - } - if (IsFp64Mode()) { - CHECK_EQ(test.cvt_l_s_out, std::numeric_limits::max()); - CHECK_EQ(test.cvt_l_d_out, std::numeric_limits::max()); - } - CHECK_EQ(test.cvt_s_d_out, static_cast(test.cvt_s_d_in)); - CHECK_EQ(test.cvt_s_w_out, static_cast(test.cvt_s_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_s_l_out, static_cast(test.cvt_s_l_in)); - } - CHECK_EQ(test.cvt_w_s_out, std::numeric_limits::max()); - CHECK_EQ(test.cvt_w_d_out, std::numeric_limits::max()); - - - test.cvt_d_s_in = -std::numeric_limits::max(); // lowest() - test.cvt_d_w_in = std::numeric_limits::min(); // lowest() - test.cvt_d_l_in = std::numeric_limits::min(); // lowest() - test.cvt_l_s_in = -std::numeric_limits::max(); // lowest() - test.cvt_l_d_in = -std::numeric_limits::max(); // lowest() - test.cvt_s_d_in = -std::numeric_limits::max(); // lowest() - test.cvt_s_w_in = std::numeric_limits::min(); // lowest() - test.cvt_s_l_in = std::numeric_limits::min(); // lowest() - test.cvt_w_s_in = -std::numeric_limits::max(); // lowest() - test.cvt_w_d_in = -std::numeric_limits::max(); // lowest() - - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.cvt_d_s_out, static_cast(test.cvt_d_s_in)); - CHECK_EQ(test.cvt_d_w_out, static_cast(test.cvt_d_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_d_l_out, static_cast(test.cvt_d_l_in)); - } - // The returned value when converting from fixed-point to float-point - // is not consistent between board, simulator and specification - // in this test case, therefore modifying the test - if (IsFp64Mode()) { - CHECK(test.cvt_l_s_out == std::numeric_limits::min() || - test.cvt_l_s_out == std::numeric_limits::max()); - CHECK(test.cvt_l_d_out == std::numeric_limits::min() || - test.cvt_l_d_out == std::numeric_limits::max()); - } - CHECK_EQ(test.cvt_s_d_out, static_cast(test.cvt_s_d_in)); - CHECK_EQ(test.cvt_s_w_out, static_cast(test.cvt_s_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_s_l_out, static_cast(test.cvt_s_l_in)); - } - CHECK(test.cvt_w_s_out == std::numeric_limits::min() || - test.cvt_w_s_out == std::numeric_limits::max()); - CHECK(test.cvt_w_d_out == std::numeric_limits::min() || - test.cvt_w_d_out == std::numeric_limits::max()); - - - test.cvt_d_s_in = std::numeric_limits::min(); - test.cvt_d_w_in = std::numeric_limits::min(); - test.cvt_d_l_in = std::numeric_limits::min(); - test.cvt_l_s_in = std::numeric_limits::min(); - test.cvt_l_d_in = std::numeric_limits::min(); - test.cvt_s_d_in = std::numeric_limits::min(); - test.cvt_s_w_in = std::numeric_limits::min(); - test.cvt_s_l_in = std::numeric_limits::min(); - test.cvt_w_s_in = std::numeric_limits::min(); - test.cvt_w_d_in = std::numeric_limits::min(); - - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.cvt_d_s_out, static_cast(test.cvt_d_s_in)); - CHECK_EQ(test.cvt_d_w_out, static_cast(test.cvt_d_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_d_l_out, static_cast(test.cvt_d_l_in)); - } - if (IsFp64Mode()) { - CHECK_EQ(0, test.cvt_l_s_out); - CHECK_EQ(0, test.cvt_l_d_out); - } - CHECK_EQ(test.cvt_s_d_out, static_cast(test.cvt_s_d_in)); - CHECK_EQ(test.cvt_s_w_out, static_cast(test.cvt_s_w_in)); - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - CHECK_EQ(test.cvt_s_l_out, static_cast(test.cvt_s_l_in)); - } - CHECK_EQ(0, test.cvt_w_s_out); - CHECK_EQ(0, test.cvt_w_d_out); -} - - -TEST(DIV_FMT) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - struct Test { - double dOp1; - double dOp2; - double dRes; - float fOp1; - float fOp2; - float fRes; - }; - - Test test; - - // Save FCSR. - __ cfc1(a1, FCSR); - // Disable FPU exceptions. - __ ctc1(zero_reg, FCSR); - - __ Ldc1(f4, MemOperand(a0, offsetof(Test, dOp1))); - __ Ldc1(f2, MemOperand(a0, offsetof(Test, dOp2))); - __ nop(); - __ div_d(f6, f4, f2); - __ Sdc1(f6, MemOperand(a0, offsetof(Test, dRes))); - - __ lwc1(f4, MemOperand(a0, offsetof(Test, fOp1)) ); - __ lwc1(f2, MemOperand(a0, offsetof(Test, fOp2)) ); - __ nop(); - __ div_s(f6, f4, f2); - __ swc1(f6, MemOperand(a0, offsetof(Test, fRes)) ); - - // Restore FCSR. - __ ctc1(a1, FCSR); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&test, 0, 0, 0, 0)); - - const int test_size = 3; - - double dOp1[test_size] = { - 5.0, - DBL_MAX, - DBL_MAX, - }; - double dOp2[test_size] = { - 2.0, - 2.0, - -DBL_MAX, - }; - double dRes[test_size] = { - 2.5, - DBL_MAX / 2.0, - -1.0, - }; - float fOp1[test_size] = { - 5.0, - FLT_MAX, - FLT_MAX, - }; - float fOp2[test_size] = { - 2.0, - 2.0, - -FLT_MAX, - }; - float fRes[test_size] = { - 2.5, - FLT_MAX / 2.0, - -1.0, - }; - - for (int i = 0; i < test_size; i++) { - test.dOp1 = dOp1[i]; - test.dOp2 = dOp2[i]; - test.fOp1 = fOp1[i]; - test.fOp2 = fOp2[i]; - - (f.Call(&test, 0, 0, 0, 0)); - CHECK_EQ(test.dRes, dRes[i]); - CHECK_EQ(test.fRes, fRes[i]); - } - - test.dOp1 = DBL_MAX; - test.dOp2 = -0.0; - test.fOp1 = FLT_MAX; - test.fOp2 = -0.0; - - (f.Call(&test, 0, 0, 0, 0)); - CHECK(!std::isfinite(test.dRes)); - CHECK(!std::isfinite(test.fRes)); - - test.dOp1 = 0.0; - test.dOp2 = -0.0; - test.fOp1 = 0.0; - test.fOp2 = -0.0; - - (f.Call(&test, 0, 0, 0, 0)); - CHECK(std::isnan(test.dRes)); - CHECK(std::isnan(test.fRes)); - - test.dOp1 = std::numeric_limits::quiet_NaN(); - test.dOp2 = -5.0; - test.fOp1 = std::numeric_limits::quiet_NaN(); - test.fOp2 = -5.0; - - (f.Call(&test, 0, 0, 0, 0)); - CHECK(std::isnan(test.dRes)); - CHECK(std::isnan(test.fRes)); -} - - -uint32_t run_align(uint32_t rs_value, uint32_t rt_value, uint8_t bp) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ align(v0, a0, a1, bp); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = - reinterpret_cast(f.Call(rs_value, rt_value, 0, 0, 0)); - - return res; -} - - -TEST(r6_align) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseAlign { - uint32_t rs_value; - uint32_t rt_value; - uint8_t bp; - uint32_t expected_res; - }; - - // clang-format off - struct TestCaseAlign tc[] = { - // rs_value, rt_value, bp, expected_res - {0x11223344, 0xAABBCCDD, 0, 0xAABBCCDD}, - {0x11223344, 0xAABBCCDD, 1, 0xBBCCDD11}, - {0x11223344, 0xAABBCCDD, 2, 0xCCDD1122}, - {0x11223344, 0xAABBCCDD, 3, 0xDD112233}, - }; - // clang-format on - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlign); - for (size_t i = 0; i < nr_test_cases; ++i) { - CHECK_EQ(tc[i].expected_res, run_align(tc[i].rs_value, - tc[i].rt_value, tc[i].bp)); - } - } -} - -uint32_t PC; // The program counter. - -uint32_t run_aluipc(int16_t offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ aluipc(v0, offset); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - PC = (uint32_t)code->entry(); // Set the program counter. - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - - -TEST(r6_aluipc) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseAluipc { - int16_t offset; - }; - - struct TestCaseAluipc tc[] = { - // offset - { -32768 }, // 0x8000 - { -1 }, // 0xFFFF - { 0 }, - { 1 }, - { 32767 }, // 0x7FFF - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAluipc); - for (size_t i = 0; i < nr_test_cases; ++i) { - PC = 0; - uint32_t res = run_aluipc(tc[i].offset); - // Now, the program_counter (PC) is set. - uint32_t expected_res = ~0x0FFFF & (PC + (tc[i].offset << 16)); - CHECK_EQ(expected_res, res); - } - } -} - - -uint32_t run_auipc(int16_t offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ auipc(v0, offset); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - PC = (uint32_t)code->entry(); // Set the program counter. - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - - -TEST(r6_auipc) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseAuipc { - int16_t offset; - }; - - struct TestCaseAuipc tc[] = { - // offset - { -32768 }, // 0x8000 - { -1 }, // 0xFFFF - { 0 }, - { 1 }, - { 32767 }, // 0x7FFF - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAuipc); - for (size_t i = 0; i < nr_test_cases; ++i) { - PC = 0; - uint32_t res = run_auipc(tc[i].offset); - // Now, the program_counter (PC) is set. - uint32_t expected_res = PC + (tc[i].offset << 16); - CHECK_EQ(expected_res, res); - } - } -} - - -uint32_t run_lwpc(int offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - // 256k instructions; 2^8k - // addiu t7, t0, 0xFFFF; (0x250FFFFF) - // ... - // addiu t4, t0, 0x0000; (0x250C0000) - uint32_t addiu_start_1 = 0x25000000; - for (int32_t i = 0xFFFFF; i >= 0xC0000; --i) { - uint32_t addiu_new = addiu_start_1 + i; - __ dd(addiu_new); - } - - __ lwpc(t8, offset); // offset 0; 0xEF080000 (t8 register) - __ mov(v0, t8); - - // 256k instructions; 2^8k - // addiu t0, t0, 0x0000; (0x25080000) - // ... - // addiu t3, t0, 0xFFFF; (0x250BFFFF) - uint32_t addiu_start_2 = 0x25000000; - for (int32_t i = 0x80000; i <= 0xBFFFF; ++i) { - uint32_t addiu_new = addiu_start_2 + i; - __ dd(addiu_new); - } - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - - -TEST(r6_lwpc) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseLwpc { - int offset; - uint32_t expected_res; - }; - - // clang-format off - struct TestCaseLwpc tc[] = { - // offset, expected_res - { -262144, 0x250FFFFF }, // offset 0x40000 - { -4, 0x250C0003 }, - { -1, 0x250C0000 }, - { 0, 0xEF080000 }, - { 1, 0x03001025 }, // mov(v0, t8) - { 2, 0x25080000 }, - { 4, 0x25080002 }, - { 262143, 0x250BFFFD }, // offset 0x3FFFF - }; - // clang-format on - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLwpc); - for (size_t i = 0; i < nr_test_cases; ++i) { - uint32_t res = run_lwpc(tc[i].offset); - CHECK_EQ(tc[i].expected_res, res); - } - } -} - - -uint32_t run_jic(int16_t offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - Label stop_execution; - __ push(ra); - __ li(v0, 0); - __ li(t1, 0x66); - - __ addiu(v0, v0, 0x1); // <-- offset = -32 - __ addiu(v0, v0, 0x2); - __ addiu(v0, v0, 0x10); - __ addiu(v0, v0, 0x20); - __ beq(v0, t1, &stop_execution); - __ nop(); - - __ nal(); // t0 <- program counter - __ mov(t0, ra); - __ jic(t0, offset); - - __ addiu(v0, v0, 0x100); - __ addiu(v0, v0, 0x200); - __ addiu(v0, v0, 0x1000); - __ addiu(v0, v0, 0x2000); // <--- offset = 16 - __ pop(ra); - __ jr(ra); - __ nop(); - - __ bind(&stop_execution); - __ pop(ra); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - - -TEST(r6_jic) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseJic { - // As rt will be used t0 register which will have value of - // the program counter for the jic instruction. - int16_t offset; - uint32_t expected_res; - }; - - struct TestCaseJic tc[] = { - // offset, expected_result - { 16, 0x2033 }, - { 4, 0x3333 }, - { -32, 0x66 }, - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseJic); - for (size_t i = 0; i < nr_test_cases; ++i) { - uint32_t res = run_jic(tc[i].offset); - CHECK_EQ(tc[i].expected_res, res); - } - } -} - - -uint64_t run_beqzc(int32_t value, int32_t offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - Label stop_execution; - __ li(v0, 0); - __ li(t1, 0x66); - - __ addiu(v0, v0, 0x1); // <-- offset = -32 - __ addiu(v0, v0, 0x2); - __ addiu(v0, v0, 0x10); - __ addiu(v0, v0, 0x20); - __ beq(v0, t1, &stop_execution); - __ nop(); - - __ beqzc(a0, offset); // BEQZC rs, offset - - __ addiu(v0, v0, 0x1); - __ addiu(v0, v0, 0x100); - __ addiu(v0, v0, 0x200); - __ addiu(v0, v0, 0x1000); - __ addiu(v0, v0, 0x2000); // <--- offset = 16 - __ jr(ra); - __ nop(); - - __ bind(&stop_execution); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(value, 0, 0, 0, 0)); - - return res; -} - - -TEST(r6_beqzc) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseBeqzc { - uint32_t value; - int32_t offset; - uint32_t expected_res; - }; - - // clang-format off - struct TestCaseBeqzc tc[] = { - // value, offset, expected_res - { 0x0, -8, 0x66 }, - { 0x0, 0, 0x3334 }, - { 0x0, 1, 0x3333 }, - { 0xABC, 1, 0x3334 }, - { 0x0, 4, 0x2033 }, - }; - // clang-format on - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeqzc); - for (size_t i = 0; i < nr_test_cases; ++i) { - uint32_t res = run_beqzc(tc[i].value, tc[i].offset); - CHECK_EQ(tc[i].expected_res, res); - } - } -} - -void load_elements_of_vector(MacroAssembler* assm_ptr, - const uint64_t elements[], MSARegister w, - Register t0, Register t1) { - MacroAssembler& assm = *assm_ptr; - __ li(t0, static_cast(elements[0] & 0xFFFFFFFF)); - __ li(t1, static_cast((elements[0] >> 32) & 0xFFFFFFFF)); - __ insert_w(w, 0, t0); - __ insert_w(w, 1, t1); - __ li(t0, static_cast(elements[1] & 0xFFFFFFFF)); - __ li(t1, static_cast((elements[1] >> 32) & 0xFFFFFFFF)); - __ insert_w(w, 2, t0); - __ insert_w(w, 3, t1); -} - -inline void store_elements_of_vector(MacroAssembler* assm_ptr, MSARegister w, - Register a) { - MacroAssembler& assm = *assm_ptr; - __ st_d(w, MemOperand(a, 0)); -} - -union msa_reg_t { - uint8_t b[16]; - uint16_t h[8]; - uint32_t w[4]; - uint64_t d[2]; -}; - -struct TestCaseMsaBranch { - uint64_t wt_lo; - uint64_t wt_hi; -}; - -template -void run_bz_bnz(TestCaseMsaBranch* input, Branch GenerateBranch, - bool branched) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - struct T { - uint64_t ws_lo; - uint64_t ws_hi; - uint64_t wd_lo; - uint64_t wd_hi; - }; - T t = {0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x0000000000000000, - 0x0000000000000000}; - msa_reg_t res; - Label do_not_move_w0_to_w2; - - load_elements_of_vector(&assm, &t.ws_lo, w0, t0, t1); - load_elements_of_vector(&assm, &t.wd_lo, w2, t0, t1); - load_elements_of_vector(&assm, &input->wt_lo, w1, t0, t1); - GenerateBranch(assm, do_not_move_w0_to_w2); - __ nop(); - __ move_v(w2, w0); - - __ bind(&do_not_move_w0_to_w2); - store_elements_of_vector(&assm, w2, a0); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&res, 0, 0, 0, 0)); - if (branched) { - CHECK_EQ(t.wd_lo, res.d[0]); - CHECK_EQ(t.wd_hi, res.d[1]); - } else { - CHECK_EQ(t.ws_lo, res.d[0]); - CHECK_EQ(t.ws_hi, res.d[1]); - } -} - -TEST(MSA_bz_bnz) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - TestCaseMsaBranch tz_v[] = { - {0x0, 0x0}, {0xABC, 0x0}, {0x0, 0xABC}, {0xABC, 0xABC}}; - for (unsigned i = 0; i < arraysize(tz_v); ++i) { - run_bz_bnz( - &tz_v[i], - [](MacroAssembler& assm, Label& br_target) { __ bz_v(w1, &br_target); }, - tz_v[i].wt_lo == 0 && tz_v[i].wt_hi == 0); - } - -#define TEST_BZ_DF(input_array, lanes, instruction, int_type) \ - for (unsigned i = 0; i < arraysize(input_array); ++i) { \ - int j; \ - int_type* element = reinterpret_cast(&input_array[i]); \ - for (j = 0; j < lanes; ++j) { \ - if (element[j] == 0) { \ - break; \ - } \ - } \ - run_bz_bnz(&input_array[i], \ - [](MacroAssembler& assm, Label& br_target) { \ - __ instruction(w1, &br_target); \ - }, \ - j != lanes); \ - } - TestCaseMsaBranch tz_b[] = {{0x0, 0x0}, - {0xBC0000, 0x0}, - {0x0, 0xAB000000000000CD}, - {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}}; - TEST_BZ_DF(tz_b, kMSALanesByte, bz_b, int8_t) - - TestCaseMsaBranch tz_h[] = {{0x0, 0x0}, - {0xBCDE0000, 0x0}, - {0x0, 0xABCD00000000ABCD}, - {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}}; - TEST_BZ_DF(tz_h, kMSALanesHalf, bz_h, int16_t) - - TestCaseMsaBranch tz_w[] = {{0x0, 0x0}, - {0xBCDE123400000000, 0x0}, - {0x0, 0x000000001234ABCD}, - {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}}; - TEST_BZ_DF(tz_w, kMSALanesWord, bz_w, int32_t) - - TestCaseMsaBranch tz_d[] = {{0x0, 0x0}, - {0xBCDE0000, 0x0}, - {0x0, 0xABCD00000000ABCD}, - {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}}; - TEST_BZ_DF(tz_d, kMSALanesDword, bz_d, int64_t) -#undef TEST_BZ_DF - - TestCaseMsaBranch tnz_v[] = { - {0x0, 0x0}, {0xABC, 0x0}, {0x0, 0xABC}, {0xABC, 0xABC}}; - for (unsigned i = 0; i < arraysize(tnz_v); ++i) { - run_bz_bnz(&tnz_v[i], - [](MacroAssembler& assm, Label& br_target) { - __ bnz_v(w1, &br_target); - }, - tnz_v[i].wt_lo != 0 || tnz_v[i].wt_hi != 0); - } - -#define TEST_BNZ_DF(input_array, lanes, instruction, int_type) \ - for (unsigned i = 0; i < arraysize(input_array); ++i) { \ - int j; \ - int_type* element = reinterpret_cast(&input_array[i]); \ - for (j = 0; j < lanes; ++j) { \ - if (element[j] == 0) { \ - break; \ - } \ - } \ - run_bz_bnz(&input_array[i], \ - [](MacroAssembler& assm, Label& br_target) { \ - __ instruction(w1, &br_target); \ - }, \ - j == lanes); \ - } - TestCaseMsaBranch tnz_b[] = {{0x0, 0x0}, - {0xBC0000, 0x0}, - {0x0, 0xAB000000000000CD}, - {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}}; - TEST_BNZ_DF(tnz_b, 16, bnz_b, int8_t) - - TestCaseMsaBranch tnz_h[] = {{0x0, 0x0}, - {0xBCDE0000, 0x0}, - {0x0, 0xABCD00000000ABCD}, - {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}}; - TEST_BNZ_DF(tnz_h, 8, bnz_h, int16_t) - - TestCaseMsaBranch tnz_w[] = {{0x0, 0x0}, - {0xBCDE123400000000, 0x0}, - {0x0, 0x000000001234ABCD}, - {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}}; - TEST_BNZ_DF(tnz_w, 4, bnz_w, int32_t) - - TestCaseMsaBranch tnz_d[] = {{0x0, 0x0}, - {0xBCDE0000, 0x0}, - {0x0, 0xABCD00000000ABCD}, - {0x123456789ABCDEF0, 0xAAAAAAAAAAAAAAAA}}; - TEST_BNZ_DF(tnz_d, 2, bnz_d, int64_t) -#undef TEST_BNZ_DF -} - -uint32_t run_jialc(int16_t offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - Label main_block; - __ push(ra); - __ li(v0, 0); - __ beq(v0, v0, &main_block); - __ nop(); - - // Block 1 - __ addiu(v0, v0, 0x1); // <-- offset = -40 - __ addiu(v0, v0, 0x2); - __ jr(ra); - __ nop(); - - // Block 2 - __ addiu(v0, v0, 0x10); // <-- offset = -24 - __ addiu(v0, v0, 0x20); - __ jr(ra); - __ nop(); - - // Block 3 (Main) - __ bind(&main_block); - __ nal(); // t0 <- program counter - __ mov(t0, ra); - __ jialc(t0, offset); - __ addiu(v0, v0, 0x4); - __ pop(ra); - __ jr(ra); - __ nop(); - - // Block 4 - __ addiu(v0, v0, 0x100); // <-- offset = 20 - __ addiu(v0, v0, 0x200); - __ jr(ra); - __ nop(); - - // Block 5 - __ addiu(v0, v0, 0x1000); // <--- offset = 36 - __ addiu(v0, v0, 0x2000); - __ jr(ra); - __ nop(); - - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - - -TEST(r6_jialc) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseJialc { - int16_t offset; - uint32_t expected_res; - }; - - struct TestCaseJialc tc[] = { - // offset, expected_res - { -40, 0x7 }, - { -24, 0x34 }, - { 20, 0x304 }, - { 36, 0x3004 } - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseJialc); - for (size_t i = 0; i < nr_test_cases; ++i) { - uint32_t res = run_jialc(tc[i].offset); - CHECK_EQ(tc[i].expected_res, res); - } - } -} - -static uint32_t run_addiupc(int32_t imm19) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ addiupc(v0, imm19); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - PC = (uint32_t)code->entry(); // Set the program counter. - - uint32_t rs = reinterpret_cast(f.Call(imm19, 0, 0, 0, 0)); - - return rs; -} - - -TEST(r6_addiupc) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseAddiupc { - int32_t imm19; - }; - - TestCaseAddiupc tc[] = { - // imm19 - {-262144}, // 0x40000 - {-1}, // 0x7FFFF - {0}, - {1}, // 0x00001 - {262143} // 0x3FFFF - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAddiupc); - for (size_t i = 0; i < nr_test_cases; ++i) { - PC = 0; - uint32_t res = run_addiupc(tc[i].imm19); - // Now, the program_counter (PC) is set. - uint32_t expected_res = PC + (tc[i].imm19 << 2); - CHECK_EQ(expected_res, res); - } - } -} - - -int32_t run_bc(int32_t offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - Label continue_1, stop_execution; - __ push(ra); - __ li(v0, 0); - __ li(t8, 0); - __ li(t9, 2); // A condition for stopping execution. - - for (int32_t i = -100; i <= -11; ++i) { - __ addiu(v0, v0, 1); - } - - __ addiu(t8, t8, 1); // -10 - - __ beq(t8, t9, &stop_execution); // -9 - __ nop(); // -8 - __ beq(t8, t8, &continue_1); // -7 - __ nop(); // -6 - - __ bind(&stop_execution); - __ pop(ra); // -5, -4 - __ jr(ra); // -3 - __ nop(); // -2 - - __ bind(&continue_1); - __ bc(offset); // -1 - - for (int32_t i = 0; i <= 99; ++i) { - __ addiu(v0, v0, 1); - } - - __ pop(ra); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - int32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - - -TEST(r6_bc) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseBc { - int32_t offset; - int32_t expected_res; - }; - - struct TestCaseBc tc[] = { - // offset, expected_result - { -100, (abs(-100) - 10) * 2 }, - { -11, (abs(-100) - 10 + 1) }, - { 0, (abs(-100) - 10 + 1 + 99) }, - { 1, (abs(-100) - 10 + 99) }, - { 99, (abs(-100) - 10 + 1) }, - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBc); - for (size_t i = 0; i < nr_test_cases; ++i) { - int32_t res = run_bc(tc[i].offset); - CHECK_EQ(tc[i].expected_res, res); - } - } -} - - -int32_t run_balc(int32_t offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - Label continue_1; - __ push(ra); - __ li(v0, 0); - __ li(t8, 0); - __ li(t9, 2); // A condition for stopping execution. - - __ beq(t8, t8, &continue_1); - __ nop(); - - uint32_t instruction_addiu = 0x24420001; // addiu v0, v0, 1 - for (int32_t i = -117; i <= -57; ++i) { - __ dd(instruction_addiu); - } - __ jr(ra); // -56 - __ nop(); // -55 - - for (int32_t i = -54; i <= -4; ++i) { - __ dd(instruction_addiu); - } - __ jr(ra); // -3 - __ nop(); // -2 - - __ bind(&continue_1); - __ balc(offset); // -1 - - __ pop(ra); // 0, 1 - __ jr(ra); // 2 - __ nop(); // 3 - - for (int32_t i = 4; i <= 44; ++i) { - __ dd(instruction_addiu); - } - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - int32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - - -uint32_t run_aui(uint32_t rs, uint16_t offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ li(t0, rs); - __ aui(v0, t0, offset); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - - -TEST(r6_aui) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseAui { - uint32_t rs; - uint16_t offset; - uint32_t ref_res; - }; - - struct TestCaseAui tc[] = { - // input, offset, result - {0xFFFEFFFF, 1, 0xFFFFFFFF}, - {0xFFFFFFFF, 0, 0xFFFFFFFF}, - {0, 0xFFFF, 0xFFFF0000}, - {0x0008FFFF, 0xFFF7, 0xFFFFFFFF}, - {32767, 32767, 0x7FFF7FFF}, - // overflow cases - {0xFFFFFFFF, 0x1, 0x0000FFFF}, - {0xFFFFFFFF, 0xFFFF, 0xFFFEFFFF}, - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAui); - for (size_t i = 0; i < nr_test_cases; ++i) { - PC = 0; - uint32_t res = run_aui(tc[i].rs, tc[i].offset); - CHECK_EQ(tc[i].ref_res, res); - } - } -} - - -TEST(r6_balc) { - if (IsMipsArchVariant(kMips32r6)) { - CcTest::InitializeVM(); - - struct TestCaseBalc { - int32_t offset; - int32_t expected_res; - }; - - struct TestCaseBalc tc[] = { - // offset, expected_result - { -117, 61 }, - { -54, 51 }, - { 0, 0 }, - { 4, 41 }, - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBalc); - for (size_t i = 0; i < nr_test_cases; ++i) { - int32_t res = run_balc(tc[i].offset); - CHECK_EQ(tc[i].expected_res, res); - } - } -} - - -uint32_t run_bal(int16_t offset) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ mov(t0, ra); - __ bal(offset); // Equivalent for "BGEZAL zero_reg, offset". - __ nop(); - - __ mov(ra, t0); - __ jr(ra); - __ nop(); - - __ li(v0, 1); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - - -TEST(bal) { - CcTest::InitializeVM(); - - struct TestCaseBal { - int16_t offset; - uint32_t expected_res; - }; - - struct TestCaseBal tc[] = { - // offset, expected_res - { 4, 1 }, - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBal); - for (size_t i = 0; i < nr_test_cases; ++i) { - CHECK_EQ(tc[i].expected_res, run_bal(tc[i].offset)); - } -} - - -TEST(Trampoline) { - // Private member of Assembler class. - static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; - - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - Label done; - size_t nr_calls = kMaxBranchOffset / (2 * kInstrSize) + 2; - - for (size_t i = 0; i < nr_calls; ++i) { - __ BranchShort(&done, eq, a0, Operand(a1)); - } - __ bind(&done); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, zero_reg); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - int32_t res = reinterpret_cast(f.Call(42, 42, 0, 0, 0)); - CHECK_EQ(0, res); -} - -TEST(Trampoline_with_massive_unbound_labels) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - const int kNumSlots = - TurboAssembler::kMaxBranchOffset / TurboAssembler::kTrampolineSlotsSize; - Label labels[kNumSlots]; - - { - TurboAssembler::BlockTrampolinePoolScope block_trampoline_pool(&assm); - for (int i = 0; i < kNumSlots; i++) { - __ Branch(&labels[i]); - } - } - - __ bind(&labels[0]); -} - -static void DummyFunction(Object result) {} - -TEST(Call_with_trampoline) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - int next_buffer_check_ = FLAG_force_long_branches - ? kMaxInt - : TurboAssembler::kMaxBranchOffset - - TurboAssembler::kTrampolineSlotsSize * 16; - - Label done; - __ Branch(&done); - next_buffer_check_ -= TurboAssembler::kTrampolineSlotsSize; - - int num_nops = (next_buffer_check_ - __ pc_offset()) / kInstrSize - 1; - for (int i = 0; i < num_nops; i++) { - __ nop(); - } - - int pc_offset_before = __ pc_offset(); - { - // There should be a trampoline after this Call - __ Call(FUNCTION_ADDR(DummyFunction), RelocInfo::RUNTIME_ENTRY); - } - int pc_offset_after = __ pc_offset(); - int safepoint_pc_offset = __ pc_offset_for_safepoint(); - - // Without trampoline, the Call emits no more than 6 instructions, otherwise - // more than 6 instructions will be generated. - int num_instrs = 6; - // pc_offset_after records the offset after trampoline. - CHECK_GT(pc_offset_after - pc_offset_before, num_instrs * kInstrSize); - // safepoint_pc_offset records the offset before trampoline. - CHECK_LE(safepoint_pc_offset - pc_offset_before, num_instrs * kInstrSize); - - __ bind(&done); -} - -template -struct TestCaseMaddMsub { - T fr, fs, ft, fd_add, fd_sub; -}; - -template -void helper_madd_msub_maddf_msubf(F func) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - T x = std::sqrt(static_cast(2.0)); - T y = std::sqrt(static_cast(3.0)); - T z = std::sqrt(static_cast(5.0)); - T x2 = 11.11, y2 = 22.22, z2 = 33.33; - TestCaseMaddMsub test_cases[] = { - {x, y, z, 0.0, 0.0}, - {x, y, -z, 0.0, 0.0}, - {x, -y, z, 0.0, 0.0}, - {x, -y, -z, 0.0, 0.0}, - {-x, y, z, 0.0, 0.0}, - {-x, y, -z, 0.0, 0.0}, - {-x, -y, z, 0.0, 0.0}, - {-x, -y, -z, 0.0, 0.0}, - {-3.14, 0.2345, -123.000056, 0.0, 0.0}, - {7.3, -23.257, -357.1357, 0.0, 0.0}, - {x2, y2, z2, 0.0, 0.0}, - {x2, y2, -z2, 0.0, 0.0}, - {x2, -y2, z2, 0.0, 0.0}, - {x2, -y2, -z2, 0.0, 0.0}, - {-x2, y2, z2, 0.0, 0.0}, - {-x2, y2, -z2, 0.0, 0.0}, - {-x2, -y2, z2, 0.0, 0.0}, - {-x2, -y2, -z2, 0.0, 0.0}, - }; - - if (std::is_same::value) { - __ lwc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub, fr))); - __ lwc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub, fs))); - __ lwc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub, ft))); - __ lwc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub, fr))); - } else if (std::is_same::value) { - __ Ldc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub, fr))); - __ Ldc1(f6, MemOperand(a0, offsetof(TestCaseMaddMsub, fs))); - __ Ldc1(f8, MemOperand(a0, offsetof(TestCaseMaddMsub, ft))); - __ Ldc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub, fr))); - } else { - UNREACHABLE(); - } - - func(assm); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub); - TestCaseMaddMsub tc; - for (size_t i = 0; i < kTableLength; i++) { - tc.fr = test_cases[i].fr; - tc.fs = test_cases[i].fs; - tc.ft = test_cases[i].ft; - - (f.Call(&tc, 0, 0, 0, 0)); - - T res_add = 0; - T res_sub = 0; - if (IsMipsArchVariant(kMips32r2)) { - res_add = (tc.fs * tc.ft) + tc.fr; - res_sub = (tc.fs * tc.ft) - tc.fr; - } else if (IsMipsArchVariant(kMips32r6)) { - res_add = std::fma(tc.fs, tc.ft, tc.fr); - res_sub = std::fma(-tc.fs, tc.ft, tc.fr); - } else { - UNREACHABLE(); - } - - CHECK_EQ(tc.fd_add, res_add); - CHECK_EQ(tc.fd_sub, res_sub); - } -} - -TEST(madd_msub_s) { - if (!IsMipsArchVariant(kMips32r2)) return; - helper_madd_msub_maddf_msubf([](MacroAssembler& assm) { - __ madd_s(f10, f4, f6, f8); - __ swc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_add))); - __ msub_s(f16, f4, f6, f8); - __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_sub))); - }); -} - -TEST(madd_msub_d) { - if (!IsMipsArchVariant(kMips32r2)) return; - helper_madd_msub_maddf_msubf([](MacroAssembler& assm) { - __ madd_d(f10, f4, f6, f8); - __ Sdc1(f10, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_add))); - __ msub_d(f16, f4, f6, f8); - __ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_sub))); - }); -} - -TEST(maddf_msubf_s) { - if (!IsMipsArchVariant(kMips32r6)) return; - helper_madd_msub_maddf_msubf([](MacroAssembler& assm) { - __ maddf_s(f4, f6, f8); - __ swc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_add))); - __ msubf_s(f16, f6, f8); - __ swc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_sub))); - }); -} - -TEST(maddf_msubf_d) { - if (!IsMipsArchVariant(kMips32r6)) return; - helper_madd_msub_maddf_msubf([](MacroAssembler& assm) { - __ maddf_d(f4, f6, f8); - __ Sdc1(f4, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_add))); - __ msubf_d(f16, f6, f8); - __ Sdc1(f16, MemOperand(a0, offsetof(TestCaseMaddMsub, fd_sub))); - }); -} - -uint32_t run_Subu(uint32_t imm, int32_t num_instr) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - Label code_start; - __ bind(&code_start); - __ Subu(v0, zero_reg, imm); - CHECK_EQ(assm.SizeOfCodeGeneratedSince(&code_start), num_instr * kInstrSize); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - -TEST(Subu) { - CcTest::InitializeVM(); - - // Test Subu macro-instruction for min_int16 and max_int16 border cases. - // For subtracting int16 immediate values we use addiu. - - struct TestCaseSubu { - uint32_t imm; - uint32_t expected_res; - int32_t num_instr; - }; - - // We call Subu(v0, zero_reg, imm) to test cases listed below. - // 0 - imm = expected_res - struct TestCaseSubu tc[] = { - // imm, expected_res, num_instr - {0xFFFF8000, 0x8000, 2}, // min_int16 - // Generates ori + addu - // We can't have just addiu because -min_int16 > max_int16 so use - // register. We can load min_int16 to at register with addiu and then - // subtract at with subu, but now we use ori + addu because -min_int16 can - // be loaded using ori. - {0x8000, 0xFFFF8000, 1}, // max_int16 + 1 - // Generates addiu - // max_int16 + 1 is not int16 but -(max_int16 + 1) is, just use addiu. - {0xFFFF7FFF, 0x8001, 2}, // min_int16 - 1 - // Generates ori + addu - // To load this value to at we need two instructions and another one to - // subtract, lui + ori + subu. But we can load -value to at using just - // ori and then add at register with addu. - {0x8001, 0xFFFF7FFF, 2}, // max_int16 + 2 - // Generates ori + subu - // Not int16 but is uint16, load value to at with ori and subtract with - // subu. - {0x00010000, 0xFFFF0000, 2}, - // Generates lui + subu - // Load value using lui to at and subtract with subu. - {0x00010001, 0xFFFEFFFF, 3}, - // Generates lui + ori + subu - // We have to generate three instructions in this case. - }; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSubu); - for (size_t i = 0; i < nr_test_cases; ++i) { - CHECK_EQ(tc[i].expected_res, run_Subu(tc[i].imm, tc[i].num_instr)); - } -} - -TEST(MSA_fill_copy) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - uint32_t u8; - uint32_t u16; - uint32_t u32; - uint32_t s8; - uint32_t s16; - uint32_t s32; - }; - T t; - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - __ li(t0, 0xA512B683); - - __ fill_b(w0, t0); - __ fill_h(w2, t0); - __ fill_w(w4, t0); - __ copy_u_b(t1, w0, 11); - __ sw(t1, MemOperand(a0, offsetof(T, u8))); - __ copy_u_h(t1, w2, 6); - __ sw(t1, MemOperand(a0, offsetof(T, u16))); - __ copy_u_w(t1, w4, 3); - __ sw(t1, MemOperand(a0, offsetof(T, u32))); - - __ copy_s_b(t1, w0, 8); - __ sw(t1, MemOperand(a0, offsetof(T, s8))); - __ copy_s_h(t1, w2, 5); - __ sw(t1, MemOperand(a0, offsetof(T, s16))); - __ copy_s_w(t1, w4, 1); - __ sw(t1, MemOperand(a0, offsetof(T, s32))); - - __ jr(ra); - __ nop(); - } - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - f.Call(&t, 0, 0, 0, 0); - - CHECK_EQ(0x83u, t.u8); - CHECK_EQ(0xB683u, t.u16); - CHECK_EQ(0xA512B683u, t.u32); - CHECK_EQ(0xFFFFFF83u, t.s8); - CHECK_EQ(0xFFFFB683u, t.s16); - CHECK_EQ(0xA512B683u, t.s32); -} - -TEST(MSA_fill_copy_2) { - // Similar to MSA_fill_copy test, but also check overlaping between MSA and - // FPU registers with same numbers - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - uint32_t w0; - uint32_t w1; - uint32_t w2; - uint32_t w3; - }; - T t[2]; - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - __ li(t0, 0xAAAAAAAA); - __ li(t1, 0x55555555); - - __ fill_w(w0, t0); - __ fill_w(w2, t0); - - __ FmoveLow(f0, t1); - __ FmoveHigh(f2, t1); - -#define STORE_MSA_REG(w_reg, base, scratch) \ - __ copy_u_w(scratch, w_reg, 0); \ - __ sw(scratch, MemOperand(base, offsetof(T, w0))); \ - __ copy_u_w(scratch, w_reg, 1); \ - __ sw(scratch, MemOperand(base, offsetof(T, w1))); \ - __ copy_u_w(scratch, w_reg, 2); \ - __ sw(scratch, MemOperand(base, offsetof(T, w2))); \ - __ copy_u_w(scratch, w_reg, 3); \ - __ sw(scratch, MemOperand(base, offsetof(T, w3))); - - STORE_MSA_REG(w0, a0, t2) - STORE_MSA_REG(w2, a1, t2) -#undef STORE_MSA_REG - - __ jr(ra); - __ nop(); - } - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - f.Call(&t[0], &t[1], 0, 0, 0); - - CHECK_EQ(0x55555555, t[0].w0); - CHECK_EQ(0xAAAAAAAA, t[0].w1); - CHECK_EQ(0xAAAAAAAA, t[0].w2); - CHECK_EQ(0xAAAAAAAA, t[0].w3); - CHECK_EQ(0xAAAAAAAA, t[1].w0); - CHECK_EQ(0x55555555, t[1].w1); - CHECK_EQ(0xAAAAAAAA, t[1].w2); - CHECK_EQ(0xAAAAAAAA, t[1].w3); -} - -TEST(MSA_fill_copy_3) { - // Similar to MSA_fill_copy test, but also check overlaping between MSA and - // FPU registers with same numbers - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - uint64_t d0; - uint64_t d1; - }; - T t[2]; - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - __ li(t0, 0xAAAAAAAA); - __ li(t1, 0x55555555); - - __ Move(f0, t0, t0); - __ Move(f2, t0, t0); - - __ fill_w(w0, t1); - __ fill_w(w2, t1); - - __ Sdc1(f0, MemOperand(a0, offsetof(T, d0))); - __ Sdc1(f2, MemOperand(a1, offsetof(T, d0))); - - __ jr(ra); - __ nop(); - } - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - f.Call(&t[0], &t[1], 0, 0, 0); - - CHECK_EQ(0x5555555555555555, t[0].d0); - CHECK_EQ(0x5555555555555555, t[1].d0); -} - -template -void run_msa_insert(int32_t rs_value, int n, msa_reg_t* w) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - __ li(t0, -1); - __ li(t1, rs_value); - __ fill_w(w0, t0); - - if (std::is_same::value) { - DCHECK_LT(n, 16); - __ insert_b(w0, n, t1); - } else if (std::is_same::value) { - DCHECK_LT(n, 8); - __ insert_h(w0, n, t1); - } else if (std::is_same::value) { - DCHECK_LT(n, 4); - __ insert_w(w0, n, t1); - } else { - UNREACHABLE(); - } - - store_elements_of_vector(&assm, w0, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(w, 0, 0, 0, 0)); -} - -TEST(MSA_insert) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseInsert { - uint32_t input; - int n; - uint64_t exp_res_lo; - uint64_t exp_res_hi; - }; - - struct TestCaseInsert tc_b[] = { - // input, n, exp_res_lo, exp_res_hi - {0xA2, 13, 0xFFFFFFFFFFFFFFFFu, 0xFFFFA2FFFFFFFFFFu}, - {0x73, 10, 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFFF73FFFFu}, - {0x3494, 5, 0xFFFF94FFFFFFFFFFu, 0xFFFFFFFFFFFFFFFFu}, - {0xA6B8, 1, 0xFFFFFFFFFFFFB8FFu, 0xFFFFFFFFFFFFFFFFu}}; - - for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseInsert); ++i) { - msa_reg_t res; - run_msa_insert(tc_b[i].input, tc_b[i].n, &res); - CHECK_EQ(tc_b[i].exp_res_lo, res.d[0]); - CHECK_EQ(tc_b[i].exp_res_hi, res.d[1]); - } - - struct TestCaseInsert tc_h[] = { - // input, n, exp_res_lo, exp_res_hi - {0x85A2, 7, 0xFFFFFFFFFFFFFFFFu, 0x85A2FFFFFFFFFFFFu}, - {0xE873, 5, 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFFE873FFFFu}, - {0x3494, 3, 0x3494FFFFFFFFFFFFu, 0xFFFFFFFFFFFFFFFFu}, - {0xA6B8, 1, 0xFFFFFFFFA6B8FFFFu, 0xFFFFFFFFFFFFFFFFu}}; - - for (size_t i = 0; i < sizeof(tc_h) / sizeof(TestCaseInsert); ++i) { - msa_reg_t res; - run_msa_insert(tc_h[i].input, tc_h[i].n, &res); - CHECK_EQ(tc_h[i].exp_res_lo, res.d[0]); - CHECK_EQ(tc_h[i].exp_res_hi, res.d[1]); - } - - struct TestCaseInsert tc_w[] = { - // input, n, exp_res_lo, exp_res_hi - {0xD2F085A2u, 3, 0xFFFFFFFFFFFFFFFFu, 0xD2F085A2FFFFFFFFu}, - {0x4567E873u, 2, 0xFFFFFFFFFFFFFFFFu, 0xFFFFFFFF4567E873u}, - {0xACDB3494u, 1, 0xACDB3494FFFFFFFFu, 0xFFFFFFFFFFFFFFFFu}, - {0x89ABA6B8u, 0, 0xFFFFFFFF89ABA6B8u, 0xFFFFFFFFFFFFFFFFu}}; - - for (size_t i = 0; i < sizeof(tc_w) / sizeof(TestCaseInsert); ++i) { - msa_reg_t res; - run_msa_insert(tc_w[i].input, tc_w[i].n, &res); - CHECK_EQ(tc_w[i].exp_res_lo, res.d[0]); - CHECK_EQ(tc_w[i].exp_res_hi, res.d[1]); - } -} - -TEST(MSA_move_v) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - uint64_t ws_lo; - uint64_t ws_hi; - uint64_t wd_lo; - uint64_t wd_hi; - }; - T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF, - 0x706E51290AC76FB9}, - {0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870, - 0x2686B73484C2E843}, - {0xD38FF9D048884FFC, 0x6DC63A57C0943CA7, 0x8520CA2F3E97C426, - 0xA9913868FB819C59}}; - - for (unsigned i = 0; i < arraysize(t); ++i) { - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - load_elements_of_vector(&assm, &t[i].ws_lo, w0, t0, t1); - load_elements_of_vector(&assm, &t[i].wd_lo, w2, t0, t1); - __ move_v(w2, w0); - store_elements_of_vector(&assm, w2, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - (f.Call(&t[i].wd_lo, 0, 0, 0, 0)); - CHECK_EQ(t[i].ws_lo, t[i].wd_lo); - CHECK_EQ(t[i].ws_hi, t[i].wd_hi); - } -} - -template -void run_msa_sldi(OperFunc GenerateOperation, - ExpectFunc GenerateExpectedResult) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - uint64_t ws_lo; - uint64_t ws_hi; - uint64_t wd_lo; - uint64_t wd_hi; - }; - T t[] = {{0x20B9CC4F1A83E0C5, 0xA27E1B5F2F5BB18A, 0x1E86678B52F8E1FF, - 0x706E51290AC76FB9}, - {0x4414AED7883FFD18, 0x047D183A06B67016, 0x4EF258CF8D822870, - 0x2686B73484C2E843}, - {0xD38FF9D048884FFC, 0x6DC63A57C0943CA7, 0x8520CA2F3E97C426, - 0xA9913868FB819C59}}; - uint64_t res[2]; - - for (unsigned i = 0; i < arraysize(t); ++i) { - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - load_elements_of_vector(&assm, &t[i].ws_lo, w0, t0, t1); - load_elements_of_vector(&assm, &t[i].wd_lo, w2, t0, t1); - GenerateOperation(assm); - store_elements_of_vector(&assm, w2, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - (f.Call(&res[0], 0, 0, 0, 0)); - GenerateExpectedResult(reinterpret_cast(&t[i].ws_lo), - reinterpret_cast(&t[i].wd_lo)); - CHECK_EQ(res[0], t[i].wd_lo); - CHECK_EQ(res[1], t[i].wd_hi); - } -} - -TEST(MSA_sldi) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - CcTest::InitializeVM(); - -#define SLDI_DF(s, k) \ - uint8_t v[32]; \ - for (unsigned i = 0; i < s; i++) { \ - v[i] = ws[s * k + i]; \ - v[i + s] = wd[s * k + i]; \ - } \ - for (unsigned i = 0; i < s; i++) { \ - wd[s * k + i] = v[i + n]; \ - } - - for (int n = 0; n < 16; ++n) { - run_msa_sldi([n](MacroAssembler& assm) { __ sldi_b(w2, w0, n); }, - [n](uint8_t* ws, uint8_t* wd) { - SLDI_DF(kMSARegSize / sizeof(int8_t) / kBitsPerByte, 0) - }); - } - - for (int n = 0; n < 8; ++n) { - run_msa_sldi([n](MacroAssembler& assm) { __ sldi_h(w2, w0, n); }, - [n](uint8_t* ws, uint8_t* wd) { - for (int k = 0; k < 2; ++k) { - SLDI_DF(kMSARegSize / sizeof(int16_t) / kBitsPerByte, k) - } - }); - } - - for (int n = 0; n < 4; ++n) { - run_msa_sldi([n](MacroAssembler& assm) { __ sldi_w(w2, w0, n); }, - [n](uint8_t* ws, uint8_t* wd) { - for (int k = 0; k < 4; ++k) { - SLDI_DF(kMSARegSize / sizeof(int32_t) / kBitsPerByte, k) - } - }); - } - - for (int n = 0; n < 2; ++n) { - run_msa_sldi([n](MacroAssembler& assm) { __ sldi_d(w2, w0, n); }, - [n](uint8_t* ws, uint8_t* wd) { - for (int k = 0; k < 8; ++k) { - SLDI_DF(kMSARegSize / sizeof(int64_t) / kBitsPerByte, k) - } - }); - } -#undef SLDI_DF -} - -void run_msa_ctc_cfc(uint32_t value) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - MSAControlRegister msareg = {kMSACSRRegister}; - __ li(t0, value); - __ li(t2, 0); - __ cfcmsa(t1, msareg); - __ ctcmsa(msareg, t0); - __ cfcmsa(t2, msareg); - __ ctcmsa(msareg, t1); - __ sw(t2, MemOperand(a0, 0)); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - uint32_t res; - (f.Call(&res, 0, 0, 0, 0)); - - CHECK_EQ(value & 0x0167FFFF, res); -} - -TEST(MSA_cfc_ctc) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const uint32_t mask_without_cause = 0xFF9C0FFF; - const uint32_t mask_always_zero = 0x0167FFFF; - const uint32_t mask_enables = 0x00000F80; - uint32_t test_case[] = {0x2D5EDE31, 0x07955425, 0x15B7DBE3, 0x2BF8BC37, - 0xE6AAE923, 0x24D0F68D, 0x41AFA84C, 0x2D6BF64F, - 0x925014BD, 0x4DBA7E61}; - for (unsigned i = 0; i < arraysize(test_case); i++) { - // Setting enable bits and corresponding cause bits could result in - // exception raised and this prevents that from happening - test_case[i] = (~test_case[i] & mask_enables) << 5 | - (test_case[i] & mask_without_cause); - run_msa_ctc_cfc(test_case[i] & mask_always_zero); - } -} - -struct ExpResShf { - uint8_t i8; - uint64_t lo; - uint64_t hi; -}; - -void run_msa_i8(SecondaryField opcode, uint64_t ws_lo, uint64_t ws_hi, - uint8_t i8) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - msa_reg_t res; - uint64_t wd_lo = 0xF35862E13E38F8B0; - uint64_t wd_hi = 0x4F41FFDEF2BFE636; - -#define LOAD_W_REG(lo, hi, w_reg) \ - __ li(t0, static_cast(lo & 0xFFFFFFFF)); \ - __ li(t1, static_cast((lo >> 32) & 0xFFFFFFFF)); \ - __ insert_w(w_reg, 0, t0); \ - __ insert_w(w_reg, 1, t1); \ - __ li(t0, static_cast(hi & 0xFFFFFFFF)); \ - __ li(t1, static_cast((hi >> 32) & 0xFFFFFFFF)); \ - __ insert_w(w_reg, 2, t0); \ - __ insert_w(w_reg, 3, t1); - - LOAD_W_REG(ws_lo, ws_hi, w0) - - switch (opcode) { - case ANDI_B: - __ andi_b(w2, w0, i8); - break; - case ORI_B: - __ ori_b(w2, w0, i8); - break; - case NORI_B: - __ nori_b(w2, w0, i8); - break; - case XORI_B: - __ xori_b(w2, w0, i8); - break; - case BMNZI_B: - LOAD_W_REG(wd_lo, wd_hi, w2); - __ bmnzi_b(w2, w0, i8); - break; - case BMZI_B: - LOAD_W_REG(wd_lo, wd_hi, w2); - __ bmzi_b(w2, w0, i8); - break; - case BSELI_B: - LOAD_W_REG(wd_lo, wd_hi, w2); - __ bseli_b(w2, w0, i8); - break; - case SHF_B: - __ shf_b(w2, w0, i8); - break; - case SHF_H: - __ shf_h(w2, w0, i8); - break; - case SHF_W: - __ shf_w(w2, w0, i8); - break; - default: - UNREACHABLE(); - } - - store_elements_of_vector(&assm, w2, a0); - - __ jr(ra); - __ nop(); - -#undef LOAD_W_REG - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&res, 0, 0, 0, 0)); - - uint64_t mask = i8 * 0x0101010101010101ull; - switch (opcode) { - case ANDI_B: - CHECK_EQ(ws_lo & mask, res.d[0]); - CHECK_EQ(ws_hi & mask, res.d[1]); - break; - case ORI_B: - CHECK_EQ(ws_lo | mask, res.d[0]); - CHECK_EQ(ws_hi | mask, res.d[1]); - break; - case NORI_B: - CHECK_EQ(~(ws_lo | mask), res.d[0]); - CHECK_EQ(~(ws_hi | mask), res.d[1]); - break; - case XORI_B: - CHECK_EQ(ws_lo ^ mask, res.d[0]); - CHECK_EQ(ws_hi ^ mask, res.d[1]); - break; - case BMNZI_B: - CHECK_EQ((ws_lo & mask) | (wd_lo & ~mask), res.d[0]); - CHECK_EQ((ws_hi & mask) | (wd_hi & ~mask), res.d[1]); - break; - case BMZI_B: - CHECK_EQ((ws_lo & ~mask) | (wd_lo & mask), res.d[0]); - CHECK_EQ((ws_hi & ~mask) | (wd_hi & mask), res.d[1]); - break; - case BSELI_B: - CHECK_EQ((ws_lo & ~wd_lo) | (mask & wd_lo), res.d[0]); - CHECK_EQ((ws_hi & ~wd_hi) | (mask & wd_hi), res.d[1]); - break; - case SHF_B: { - struct ExpResShf exp_b[] = { - // i8, exp_lo, exp_hi - {0xFFu, 0x11111111B9B9B9B9, 0xF7F7F7F7C8C8C8C8}, - {0x0u, 0x62626262DFDFDFDF, 0xD6D6D6D6C8C8C8C8}, - {0xE4u, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636}, - {0x1Bu, 0x1B756911C3D9A7B9, 0xAE94A5F79C8AEFC8}, - {0xB1u, 0x662B6253E8C4DF12, 0x0D3AD6803F8BC88B}, - {0x4Eu, 0x62E1F358F8B03E38, 0xFFDE4F41E636F2BF}, - {0x27u, 0x1B697511C3A7D9B9, 0xAEA594F79CEF8AC8}}; - for (size_t i = 0; i < sizeof(exp_b) / sizeof(ExpResShf); ++i) { - if (exp_b[i].i8 == i8) { - CHECK_EQ(exp_b[i].lo, res.d[0]); - CHECK_EQ(exp_b[i].hi, res.d[1]); - } - } - } break; - case SHF_H: { - struct ExpResShf exp_h[] = { - // i8, exp_lo, exp_hi - {0xFFu, 0x1169116911691169, 0xF7A5F7A5F7A5F7A5}, - {0x0u, 0x12DF12DF12DF12DF, 0x8BC88BC88BC88BC8}, - {0xE4u, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636}, - {0x1Bu, 0xD9C3B9A7751B1169, 0x8A9CC8EF94AEF7A5}, - {0xB1u, 0x53622B6612DFC4E8, 0x80D63A0D8BC88B3F}, - {0x4Eu, 0x3E38F8B0F35862E1, 0xF2BFE6364F41FFDE}, - {0x27u, 0xD9C3751BB9A71169, 0x8A9C94AEC8EFF7A5}}; - for (size_t i = 0; i < sizeof(exp_h) / sizeof(ExpResShf); ++i) { - if (exp_h[i].i8 == i8) { - CHECK_EQ(exp_h[i].lo, res.d[0]); - CHECK_EQ(exp_h[i].hi, res.d[1]); - } - } - } break; - case SHF_W: { - struct ExpResShf exp_w[] = { - // i8, exp_lo, exp_hi - {0xFFu, 0xF7A594AEF7A594AE, 0xF7A594AEF7A594AE}, - {0x0u, 0xC4E812DFC4E812DF, 0xC4E812DFC4E812DF}, - {0xE4u, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636}, - {0x1Bu, 0xC8EF8A9CF7A594AE, 0xB9A7D9C31169751B}, - {0xB1u, 0xC4E812DF2B665362, 0x8B3F8BC83A0D80D6}, - {0x4Eu, 0x4F41FFDEF2BFE636, 0xF35862E13E38F8B0}, - {0x27u, 0x1169751BF7A594AE, 0xB9A7D9C3C8EF8A9C}}; - for (size_t i = 0; i < sizeof(exp_w) / sizeof(ExpResShf); ++i) { - if (exp_w[i].i8 == i8) { - CHECK_EQ(exp_w[i].lo, res.d[0]); - CHECK_EQ(exp_w[i].hi, res.d[1]); - } - } - } break; - default: - UNREACHABLE(); - } -} - -struct TestCaseMsaI8 { - uint64_t input_lo; - uint64_t input_hi; - uint8_t i8; -}; - -TEST(MSA_andi_ori_nori_xori) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaI8 tc[] = {// input_lo, input_hi, i8 - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0xFFu}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0u}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x3Bu}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0xD9u}}; - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI8); ++i) { - run_msa_i8(ANDI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - run_msa_i8(ORI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - run_msa_i8(NORI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - run_msa_i8(XORI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - } -} - -TEST(MSA_bmnzi_bmzi_bseli) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaI8 tc[] = {// input_lo, input_hi, i8 - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0xFFu}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0u}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x3Bu}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0xD9u}}; - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI8); ++i) { - run_msa_i8(BMNZI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - run_msa_i8(BMZI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - run_msa_i8(BSELI_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - } -} - -TEST(MSA_shf) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaI8 tc[] = { - // input_lo, input_hi, i8 - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0xFFu}, // 3333 - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0u}, // 0000 - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0xE4u}, // 3210 - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x1Bu}, // 0123 - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0xB1u}, // 2301 - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x4Eu}, // 1032 - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x27u} // 0213 - }; - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI8); ++i) { - run_msa_i8(SHF_B, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - run_msa_i8(SHF_H, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - run_msa_i8(SHF_W, tc[i].input_lo, tc[i].input_hi, tc[i].i8); - } -} - -uint32_t run_Ins(uint32_t imm, uint32_t source, uint16_t pos, uint16_t size) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ li(v0, imm); - __ li(t0, source); - __ Ins(v0, t0, pos, size); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - -TEST(Ins) { - CcTest::InitializeVM(); - - // run_Ins(rt_value, rs_value, pos, size), expected_result - CHECK_EQ(run_Ins(0x55555555, 0xABCDEF01, 31, 1), 0xD5555555); - CHECK_EQ(run_Ins(0x55555555, 0xABCDEF02, 30, 2), 0x95555555); - CHECK_EQ(run_Ins(0x01234567, 0xFABCDEFF, 0, 32), 0xFABCDEFF); - - // Results with positive sign. - CHECK_EQ(run_Ins(0x55555550, 0x80000001, 0, 1), 0x55555551); - CHECK_EQ(run_Ins(0x55555555, 0x40000001, 0, 32), 0x40000001); - CHECK_EQ(run_Ins(0x55555555, 0x20000001, 1, 31), 0x40000003); - CHECK_EQ(run_Ins(0x55555555, 0x80700001, 8, 24), 0x70000155); - CHECK_EQ(run_Ins(0x55555555, 0x80007001, 16, 16), 0x70015555); - CHECK_EQ(run_Ins(0x55555555, 0x80000071, 24, 8), 0x71555555); - CHECK_EQ(run_Ins(0x75555555, 0x40000000, 31, 1), 0x75555555); - - // Results with negative sign. - CHECK_EQ(run_Ins(0x85555550, 0x80000001, 0, 1), 0x85555551); - CHECK_EQ(run_Ins(0x55555555, 0x80000001, 0, 32), 0x80000001); - CHECK_EQ(run_Ins(0x55555555, 0x40000001, 1, 31), 0x80000003); - CHECK_EQ(run_Ins(0x55555555, 0x80800001, 8, 24), 0x80000155); - CHECK_EQ(run_Ins(0x55555555, 0x80008001, 16, 16), 0x80015555); - CHECK_EQ(run_Ins(0x55555555, 0x80000081, 24, 8), 0x81555555); - CHECK_EQ(run_Ins(0x75555555, 0x00000001, 31, 1), 0xF5555555); -} - -uint32_t run_Ext(uint32_t source, uint16_t pos, uint16_t size) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - - __ li(v0, 0xFFFFFFFF); - __ li(t0, source); - __ Ext(v0, t0, pos, size); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(0, 0, 0, 0, 0)); - - return res; -} - -TEST(Ext) { - CcTest::InitializeVM(); - - // Source values with negative sign. - // run_Ext(rs_value, pos, size), expected_result - CHECK_EQ(run_Ext(0x80000001, 0, 1), 0x00000001); - CHECK_EQ(run_Ext(0x80000001, 0, 32), 0x80000001); - CHECK_EQ(run_Ext(0x80000002, 1, 31), 0x40000001); - CHECK_EQ(run_Ext(0x80000100, 8, 24), 0x00800001); - CHECK_EQ(run_Ext(0x80010000, 16, 16), 0x00008001); - CHECK_EQ(run_Ext(0x81000000, 24, 8), 0x00000081); - CHECK_EQ(run_Ext(0x80000000, 31, 1), 0x00000001); - - // Source values with positive sign. - CHECK_EQ(run_Ext(0x00000001, 0, 1), 0x00000001); - CHECK_EQ(run_Ext(0x40000001, 0, 32), 0x40000001); - CHECK_EQ(run_Ext(0x40000002, 1, 31), 0x20000001); - CHECK_EQ(run_Ext(0x40000100, 8, 24), 0x00400001); - CHECK_EQ(run_Ext(0x40010000, 16, 16), 0x00004001); - CHECK_EQ(run_Ext(0x41000000, 24, 8), 0x00000041); - CHECK_EQ(run_Ext(0x40000000, 31, 1), 0x00000000); -} - -struct TestCaseMsaI5 { - uint64_t ws_lo; - uint64_t ws_hi; - uint32_t i5; -}; - -template -void run_msa_i5(struct TestCaseMsaI5* input, bool i5_sign_ext, - InstFunc GenerateI5InstructionFunc, - OperFunc GenerateOperationFunc) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - msa_reg_t res; - int32_t i5 = - i5_sign_ext ? static_cast(input->i5 << 27) >> 27 : input->i5; - - load_elements_of_vector(&assm, &(input->ws_lo), w0, t0, t1); - - GenerateI5InstructionFunc(assm, i5); - - store_elements_of_vector(&assm, w2, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&res, 0, 0, 0, 0)); - - CHECK_EQ(GenerateOperationFunc(input->ws_lo, input->i5), res.d[0]); - CHECK_EQ(GenerateOperationFunc(input->ws_hi, input->i5), res.d[1]); -} - -TEST(MSA_addvi_subvi) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaI5 tc[] = { - // ws_lo, ws_hi, i5 - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x0000001F}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0000000F}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x00000005}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x00000010}, - {0xFFAB807F807FFFCD, 0x7F23FF80FF567F80, 0x0000000F}, - {0x80FFEFFF7F12807F, 0x807F80FF7FDEFF78, 0x00000010}}; - -#define ADDVI_DF(lanes, mask) \ - uint64_t res = 0; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = (kMSARegSize / lanes) * i; \ - res |= ((((ws >> shift) & mask) + i5) & mask) << shift; \ - } \ - return res - -#define SUBVI_DF(lanes, mask) \ - uint64_t res = 0; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = (kMSARegSize / lanes) * i; \ - res |= ((((ws >> shift) & mask) - i5) & mask) << shift; \ - } \ - return res - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI5); ++i) { - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ addvi_b(w2, w0, i5); }, - [](uint64_t ws, uint32_t i5) { ADDVI_DF(kMSALanesByte, UINT8_MAX); }); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ addvi_h(w2, w0, i5); }, - [](uint64_t ws, uint32_t i5) { ADDVI_DF(kMSALanesHalf, UINT16_MAX); }); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ addvi_w(w2, w0, i5); }, - [](uint64_t ws, uint32_t i5) { ADDVI_DF(kMSALanesWord, UINT32_MAX); }); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ addvi_d(w2, w0, i5); }, - [](uint64_t ws, uint32_t i5) { ADDVI_DF(kMSALanesDword, UINT64_MAX); }); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ subvi_b(w2, w0, i5); }, - [](uint64_t ws, uint32_t i5) { SUBVI_DF(kMSALanesByte, UINT8_MAX); }); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ subvi_h(w2, w0, i5); }, - [](uint64_t ws, uint32_t i5) { SUBVI_DF(kMSALanesHalf, UINT16_MAX); }); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ subvi_w(w2, w0, i5); }, - [](uint64_t ws, uint32_t i5) { SUBVI_DF(kMSALanesWord, UINT32_MAX); }); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ subvi_d(w2, w0, i5); }, - [](uint64_t ws, uint32_t i5) { SUBVI_DF(kMSALanesDword, UINT64_MAX); }); - } -#undef ADDVI_DF -#undef SUBVI_DF -} - -TEST(MSA_maxi_mini) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaI5 tc[] = { - // ws_lo, ws_hi, i5 - {0x7F80FF3480FF7F00, 0x8D7FFF80FF7F6780, 0x0000001F}, - {0x7F80FF3480FF7F00, 0x8D7FFF80FF7F6780, 0x0000000F}, - {0x7F80FF3480FF7F00, 0x8D7FFF80FF7F6780, 0x00000010}, - {0x80007FFF91DAFFFF, 0x7FFF8000FFFF5678, 0x0000001F}, - {0x80007FFF91DAFFFF, 0x7FFF8000FFFF5678, 0x0000000F}, - {0x80007FFF91DAFFFF, 0x7FFF8000FFFF5678, 0x00000010}, - {0x7FFFFFFF80000000, 0x12345678FFFFFFFF, 0x0000001F}, - {0x7FFFFFFF80000000, 0x12345678FFFFFFFF, 0x0000000F}, - {0x7FFFFFFF80000000, 0x12345678FFFFFFFF, 0x00000010}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x0000001F}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x0000000F}, - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x00000010}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x00000015}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x00000009}, - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x00000003}}; - -#define MAXI_MINI_S_DF(lanes, mask, func) \ - [](uint64_t ws, uint32_t ui5) { \ - uint64_t res = 0; \ - int64_t i5 = ArithmeticShiftRight(static_cast(ui5) << 59, 59); \ - int elem_size = kMSARegSize / lanes; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - int64_t elem = \ - static_cast(((ws >> shift) & mask) << (64 - elem_size)) >> \ - (64 - elem_size); \ - res |= static_cast(func(elem, i5) & mask) << shift; \ - } \ - return res; \ - } - -#define MAXI_MINI_U_DF(lanes, mask, func) \ - [](uint64_t ws, uint32_t ui5) { \ - uint64_t res = 0; \ - int elem_size = kMSARegSize / lanes; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - uint64_t elem = (ws >> shift) & mask; \ - res |= (func(elem, static_cast(ui5)) & mask) << shift; \ - } \ - return res; \ - } - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI5); ++i) { - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ maxi_s_b(w2, w0, i5); }, - MAXI_MINI_S_DF(kMSALanesByte, UINT8_MAX, std::max)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ maxi_s_h(w2, w0, i5); }, - MAXI_MINI_S_DF(kMSALanesHalf, UINT16_MAX, std::max)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ maxi_s_w(w2, w0, i5); }, - MAXI_MINI_S_DF(kMSALanesWord, UINT32_MAX, std::max)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ maxi_s_d(w2, w0, i5); }, - MAXI_MINI_S_DF(kMSALanesDword, UINT64_MAX, std::max)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ mini_s_b(w2, w0, i5); }, - MAXI_MINI_S_DF(kMSALanesByte, UINT8_MAX, std::min)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ mini_s_h(w2, w0, i5); }, - MAXI_MINI_S_DF(kMSALanesHalf, UINT16_MAX, std::min)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ mini_s_w(w2, w0, i5); }, - MAXI_MINI_S_DF(kMSALanesWord, UINT32_MAX, std::min)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ mini_s_d(w2, w0, i5); }, - MAXI_MINI_S_DF(kMSALanesDword, UINT64_MAX, std::min)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ maxi_u_b(w2, w0, i5); }, - MAXI_MINI_U_DF(kMSALanesByte, UINT8_MAX, std::max)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ maxi_u_h(w2, w0, i5); }, - MAXI_MINI_U_DF(kMSALanesHalf, UINT16_MAX, std::max)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ maxi_u_w(w2, w0, i5); }, - MAXI_MINI_U_DF(kMSALanesWord, UINT32_MAX, std::max)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ maxi_u_d(w2, w0, i5); }, - MAXI_MINI_U_DF(kMSALanesDword, UINT64_MAX, std::max)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ mini_u_b(w2, w0, i5); }, - MAXI_MINI_U_DF(kMSALanesByte, UINT8_MAX, std::min)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ mini_u_h(w2, w0, i5); }, - MAXI_MINI_U_DF(kMSALanesHalf, UINT16_MAX, std::min)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ mini_u_w(w2, w0, i5); }, - MAXI_MINI_U_DF(kMSALanesWord, UINT32_MAX, std::min)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ mini_u_d(w2, w0, i5); }, - MAXI_MINI_U_DF(kMSALanesDword, UINT64_MAX, std::min)); - } -#undef MAXI_MINI_S_DF -#undef MAXI_MINI_U_DF -} - -TEST(MSA_ceqi_clti_clei) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaI5 tc[] = { - {0xFF69751BB9A7D9C3, 0xF7A594AEC8FF8A9C, 0x0000001F}, - {0xE669FFFFB9A7D9C3, 0xF7A594AEFFFF8A9C, 0x0000001F}, - {0xFFFFFFFFB9A7D9C3, 0xF7A594AEFFFFFFFF, 0x0000001F}, - {0x2B0B5362C4E812DF, 0x3A0D80D68B3F0BC8, 0x0000000B}, - {0x2B66000BC4E812DF, 0x3A0D000B8B3F8BC8, 0x0000000B}, - {0x0000000BC4E812DF, 0x3A0D80D60000000B, 0x0000000B}, - {0xF38062E13E38F8B0, 0x8041FFDEF2BFE636, 0x00000010}, - {0xF35880003E38F8B0, 0x4F41FFDEF2BF8000, 0x00000010}, - {0xF35862E180000000, 0x80000000F2BFE636, 0x00000010}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x00000015}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x00000009}, - {0xF30062E13E38F800, 0x4F00FFDEF2BF0036, 0x00000000}}; - -#define CEQI_CLTI_CLEI_S_DF(lanes, mask, func) \ - [](uint64_t ws, uint32_t ui5) { \ - uint64_t res = 0; \ - int elem_size = kMSARegSize / lanes; \ - int64_t i5 = ArithmeticShiftRight(static_cast(ui5) << 59, 59); \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - int64_t elem = \ - static_cast(((ws >> shift) & mask) << (64 - elem_size)) >> \ - (64 - elem_size); \ - res |= static_cast((func)&mask) << shift; \ - } \ - return res; \ - } - -#define CEQI_CLTI_CLEI_U_DF(lanes, mask, func) \ - [](uint64_t ws, uint64_t ui5) { \ - uint64_t res = 0; \ - int elem_size = kMSARegSize / lanes; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - uint64_t elem = (ws >> shift) & mask; \ - res |= ((func)&mask) << shift; \ - } \ - return res; \ - } - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaI5); ++i) { - run_msa_i5(&tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ ceqi_b(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesByte, UINT8_MAX, - !Compare(elem, i5) ? -1u : 0u)); - - run_msa_i5(&tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ ceqi_h(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesHalf, UINT16_MAX, - !Compare(elem, i5) ? -1u : 0u)); - - run_msa_i5(&tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ ceqi_w(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesWord, UINT32_MAX, - !Compare(elem, i5) ? -1u : 0u)); - - run_msa_i5(&tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ ceqi_d(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesDword, UINT64_MAX, - !Compare(elem, i5) ? -1u : 0u)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ clti_s_b(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesByte, UINT8_MAX, - (Compare(elem, i5) == -1) ? -1u : 0u)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ clti_s_h(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesHalf, UINT16_MAX, - (Compare(elem, i5) == -1) ? -1u : 0u)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ clti_s_w(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesWord, UINT32_MAX, - (Compare(elem, i5) == -1) ? -1u : 0u)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ clti_s_d(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesDword, UINT64_MAX, - (Compare(elem, i5) == -1) ? -1ull : 0ull)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ clei_s_b(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesByte, UINT8_MAX, - (Compare(elem, i5) != 1) ? -1u : 0u)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ clei_s_h(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesHalf, UINT16_MAX, - (Compare(elem, i5) != 1) ? -1u : 0u)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ clei_s_w(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesWord, UINT32_MAX, - (Compare(elem, i5) != 1) ? -1u : 0u)); - - run_msa_i5( - &tc[i], true, - [](MacroAssembler& assm, int32_t i5) { __ clei_s_d(w2, w0, i5); }, - CEQI_CLTI_CLEI_S_DF(kMSALanesDword, UINT64_MAX, - (Compare(elem, i5) != 1) ? -1ull : 0ull)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ clti_u_b(w2, w0, i5); }, - CEQI_CLTI_CLEI_U_DF(kMSALanesByte, UINT8_MAX, - (Compare(elem, ui5) == -1) ? -1ull : 0ull)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ clti_u_h(w2, w0, i5); }, - CEQI_CLTI_CLEI_U_DF(kMSALanesHalf, UINT16_MAX, - (Compare(elem, ui5) == -1) ? -1ull : 0ull)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ clti_u_w(w2, w0, i5); }, - CEQI_CLTI_CLEI_U_DF(kMSALanesWord, UINT32_MAX, - (Compare(elem, ui5) == -1) ? -1ull : 0ull)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ clti_u_d(w2, w0, i5); }, - CEQI_CLTI_CLEI_U_DF(kMSALanesDword, UINT64_MAX, - (Compare(elem, ui5) == -1) ? -1ull : 0ull)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ clei_u_b(w2, w0, i5); }, - CEQI_CLTI_CLEI_U_DF(kMSALanesByte, UINT8_MAX, - (Compare(elem, ui5) != 1) ? -1ull : 0ull)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ clei_u_h(w2, w0, i5); }, - CEQI_CLTI_CLEI_U_DF(kMSALanesHalf, UINT16_MAX, - (Compare(elem, ui5) != 1) ? -1ull : 0ull)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ clei_u_w(w2, w0, i5); }, - CEQI_CLTI_CLEI_U_DF(kMSALanesWord, UINT32_MAX, - (Compare(elem, ui5) != 1) ? -1ull : 0ull)); - - run_msa_i5( - &tc[i], false, - [](MacroAssembler& assm, int32_t i5) { __ clei_u_d(w2, w0, i5); }, - CEQI_CLTI_CLEI_U_DF(kMSALanesDword, UINT64_MAX, - (Compare(elem, ui5) != 1) ? -1ull : 0ull)); - } -#undef CEQI_CLTI_CLEI_S_DF -#undef CEQI_CLTI_CLEI_U_DF -} - -struct TestCaseMsa2R { - uint64_t ws_lo; - uint64_t ws_hi; - uint64_t exp_res_lo; - uint64_t exp_res_hi; -}; - -template -void run_msa_2r(const struct TestCaseMsa2R* input, - Func Generate2RInstructionFunc) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - msa_reg_t res; - - load_elements_of_vector(&assm, reinterpret_cast(input), w0, - t0, t1); - Generate2RInstructionFunc(assm); - store_elements_of_vector(&assm, w2, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&res, 0, 0, 0, 0)); - - CHECK_EQ(input->exp_res_lo, res.d[0]); - CHECK_EQ(input->exp_res_hi, res.d[1]); -} - -TEST(MSA_pcnt) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsa2R tc_b[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0x0000000000000000, 0x0000000000000000, 0, 0}, - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0x0808080808080808, 0x0808080808080808}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, - 0x0204050405050504, 0x0704030503070304}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, - 0x0404040303040207, 0x0403010504060403}, - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, - 0x0603030405030503, 0x0502080605070504}}; - - struct TestCaseMsa2R tc_h[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0x0000000000000000, 0x0000000000000000, 0, 0}, - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0x0010001000100010, 0x0010001000100010}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, - 0x00060009000A0009, 0x000B0008000A0007}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, - 0x0008000700070009, 0x00070006000A0007}, - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, - 0x0009000700080008, 0x0007000E000C0009}}; - - struct TestCaseMsa2R tc_w[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0x0000000000000000, 0x0000000000000000, 0, 0}, - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0x0000002000000020, 0x0000002000000020}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, - 0x0000000F00000013, 0x0000001300000011}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, - 0x0000000F00000010, 0x0000000D00000011}, - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, - 0x0000001000000010, 0x0000001500000015}}; - - struct TestCaseMsa2R tc_d[] = { - // ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0x0000000000000000, 0x0000000000000000, 0, 0}, - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x40, 0x40}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x22, 0x24}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x1F, 0x1E}, - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x20, 0x2A}}; - - for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) { - run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ pcnt_b(w2, w0); }); - run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ pcnt_h(w2, w0); }); - run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ pcnt_w(w2, w0); }); - run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ pcnt_d(w2, w0); }); - } -} - -TEST(MSA_nlzc) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsa2R tc_b[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0x0000000000000000, 0x0000000000000000, - 0x0808080808080808, 0x0808080808080808}, - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0}, - {0x1169350B07030100, 0x7F011402381F0A6C, - 0x0301020405060708, 0x0107030602030401}, - {0x010806003478121F, 0x03013016073F7B08, - 0x0704050802010303, 0x0607020305020104}, - {0x0168321100083803, 0x07113F03013F1676, - 0x0701020308040206, 0x0503020607020301}}; - - struct TestCaseMsa2R tc_h[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0x0000000000000000, 0x0000000000000000, - 0x0010001000100010, 0x0010001000100010}, - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0}, - {0x00010007000A003C, 0x37A5001E00010002, - 0x000F000D000C000A, 0x0002000B000F000E}, - {0x0026066200780EDF, 0x003D0003000F00C8, - 0x000A000500090004, 0x000A000E000C0008}, - {0x335807E100480030, 0x01410FDE12BF5636, - 0x000200050009000A, 0x0007000400030001}}; - - struct TestCaseMsa2R tc_w[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0x0000000000000000, 0x0000000000000000, - 0x0000002000000020, 0x0000002000000020}, - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0}, - {0x00000005000007C3, 0x000014AE00006A9C, - 0x0000001D00000015, 0x0000001300000011}, - {0x00009362000112DF, 0x000380D6003F8BC8, - 0x000000100000000F, 0x0000000E0000000A}, - {0x135862E17E38F8B0, 0x0061FFDE03BFE636, - 0x0000000300000001, 0x0000000900000006}}; - - struct TestCaseMsa2R tc_d[] = { - // ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0x0000000000000000, 0x0000000000000000, 0x40, 0x40}, - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0, 0}, - {0x000000000000014E, 0x00000000000176DA, 0x37, 0x2F}, - {0x00000062C4E812DF, 0x000065D68B3F8BC8, 0x19, 0x11}, - {0x00000000E338F8B0, 0x0754534ACAB32654, 0x20, 0x5}}; - - for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) { - run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nlzc_b(w2, w0); }); - run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nlzc_h(w2, w0); }); - run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nlzc_w(w2, w0); }); - run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nlzc_d(w2, w0); }); - } -} - -TEST(MSA_nloc) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsa2R tc_b[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0x0808080808080808, 0x0808080808080808}, - {0x0000000000000000, 0x0000000000000000, 0, 0}, - {0xEE96CAF4F8FCFEFF, 0x80FEEBFDC7E0F593, - 0x0301020405060708, 0x0107030602030401}, - {0xFEF7F9FFCB87EDE0, 0xFCFECFE9F8C084F7, - 0x0704050802010303, 0x0607020305020104}, - {0xFE97CDEEFFF7C7FC, 0xF8EEC0FCFEC0E989, - 0x0701020308040206, 0x0503020607020301}}; - - struct TestCaseMsa2R tc_h[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0x0010001000100010, 0x0010001000100010}, - {0x0000000000000000, 0x0000000000000000, 0, 0}, - {0xFFFEFFF8FFF5FFC3, 0xC85AFFE1FFFEFFFD, - 0x000F000D000C000A, 0x0002000B000F000E}, - {0xFFD9F99DFF87F120, 0xFFC2FFFCFFF0FF37, - 0x000A000500090004, 0x000A000E000C0008}, - {0xCCA7F81EFFB7FFCF, 0xFEBEF021ED40A9C9, - 0x000200050009000A, 0x0007000400030001}}; - - struct TestCaseMsa2R tc_w[] = {// ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0x0000002000000020, 0x0000002000000020}, - {0x0000000000000000, 0x0000000000000000, 0, 0}, - {0xFFFFFFFAFFFFF83C, 0xFFFFEB51FFFF9563, - 0x0000001D00000015, 0x0000001300000011}, - {0xFFFF6C9DFFFEED20, 0xFFFC7F29FFC07437, - 0x000000100000000F, 0x0000000E0000000A}, - {0xECA79D1E81C7074F, 0xFF9E0021FC4019C9, - 0x0000000300000001, 0x0000000900000006}}; - - struct TestCaseMsa2R tc_d[] = { - // ws_lo, ws_hi, exp_res_lo, exp_res_hi - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0x40, 0x40}, - {0x0000000000000000, 0x0000000000000000, 0, 0}, - {0xFFFFFFFFFFFFFEB1, 0xFFFFFFFFFFFE8925, 0x37, 0x2F}, - {0xFFFFFF9D3B17ED20, 0xFFFF9A2974C07437, 0x19, 0x11}, - {0xFFFFFFFF1CC7074F, 0xF8ABACB5354CD9AB, 0x20, 0x5}}; - - for (size_t i = 0; i < sizeof(tc_b) / sizeof(TestCaseMsa2R); ++i) { - run_msa_2r(&tc_b[i], [](MacroAssembler& assm) { __ nloc_b(w2, w0); }); - run_msa_2r(&tc_h[i], [](MacroAssembler& assm) { __ nloc_h(w2, w0); }); - run_msa_2r(&tc_w[i], [](MacroAssembler& assm) { __ nloc_w(w2, w0); }); - run_msa_2r(&tc_d[i], [](MacroAssembler& assm) { __ nloc_d(w2, w0); }); - } -} - -struct TestCaseMsa2RF_F_U { - float ws1; - float ws2; - float ws3; - float ws4; - uint32_t exp_res_1; - uint32_t exp_res_2; - uint32_t exp_res_3; - uint32_t exp_res_4; -}; - -struct TestCaseMsa2RF_D_U { - double ws1; - double ws2; - uint64_t exp_res_1; - uint64_t exp_res_2; -}; - -TEST(MSA_fclass) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - -#define BIT(n) (0x1 << n) -#define SNAN_BIT BIT(0) -#define QNAN_BIT BIT(1) -#define NEG_INFINITY_BIT BIT((2)) -#define NEG_NORMAL_BIT BIT(3) -#define NEG_SUBNORMAL_BIT BIT(4) -#define NEG_ZERO_BIT BIT(5) -#define POS_INFINITY_BIT BIT(6) -#define POS_NORMAL_BIT BIT(7) -#define POS_SUBNORMAL_BIT BIT(8) -#define POS_ZERO_BIT BIT(9) - - const float inf_float = std::numeric_limits::infinity(); - const double inf_double = std::numeric_limits::infinity(); - - const struct TestCaseMsa2RF_F_U tc_s[] = { - {1.f, -0.00001, 208e10f, -34.8e-30f, POS_NORMAL_BIT, NEG_NORMAL_BIT, - POS_NORMAL_BIT, NEG_NORMAL_BIT}, - {inf_float, -inf_float, 0, -0.f, POS_INFINITY_BIT, NEG_INFINITY_BIT, - POS_ZERO_BIT, NEG_ZERO_BIT}, - {3.036e-40f, -6.392e-43f, 1.41e-45f, -1.17e-38f, POS_SUBNORMAL_BIT, - NEG_SUBNORMAL_BIT, POS_SUBNORMAL_BIT, NEG_SUBNORMAL_BIT}}; - - const struct TestCaseMsa2RF_D_U tc_d[] = { - {1., -0.00000001, POS_NORMAL_BIT, NEG_NORMAL_BIT}, - {208e10, -34.8e-300, POS_NORMAL_BIT, NEG_NORMAL_BIT}, - {inf_double, -inf_double, POS_INFINITY_BIT, NEG_INFINITY_BIT}, - {0, -0., POS_ZERO_BIT, NEG_ZERO_BIT}, - {1.036e-308, -6.392e-309, POS_SUBNORMAL_BIT, NEG_SUBNORMAL_BIT}, - {1.41e-323, -3.17e208, POS_SUBNORMAL_BIT, NEG_NORMAL_BIT}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ fclass_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ fclass_d(w2, w0); }); - } - -#undef BIT -#undef SNAN_BIT -#undef QNAN_BIT -#undef NEG_INFINITY_BIT -#undef NEG_NORMAL_BIT -#undef NEG_SUBNORMAL_BIT -#undef NEG_ZERO_BIT -#undef POS_INFINITY_BIT -#undef POS_NORMAL_BIT -#undef POS_SUBNORMAL_BIT -#undef POS_ZERO_BIT -} - -struct TestCaseMsa2RF_F_I { - float ws1; - float ws2; - float ws3; - float ws4; - int32_t exp_res_1; - int32_t exp_res_2; - int32_t exp_res_3; - int32_t exp_res_4; -}; - -struct TestCaseMsa2RF_D_I { - double ws1; - double ws2; - int64_t exp_res_1; - int64_t exp_res_2; -}; - -TEST(MSA_ftrunc_s) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const float qNaN_float = std::numeric_limits::quiet_NaN(); - const double inf_double = std::numeric_limits::infinity(); - const double qNaN_double = std::numeric_limits::quiet_NaN(); - const int32_t max_int32 = std::numeric_limits::max(); - const int32_t min_int32 = std::numeric_limits::min(); - const int64_t max_int64 = std::numeric_limits::max(); - const int64_t min_int64 = std::numeric_limits::min(); - - const struct TestCaseMsa2RF_F_I tc_s[] = { - {inf_float, 2.345f, -324.9235f, 30004.51f, max_int32, 2, -324, 30004}, - {-inf_float, -0.983f, 0.0832f, static_cast(max_int32) * 3.f, - min_int32, 0, 0, max_int32}, - {-23.125f, qNaN_float, 2 * static_cast(min_int32), -0.f, -23, 0, - min_int32, 0}}; - - const struct TestCaseMsa2RF_D_I tc_d[] = { - {inf_double, 2.345, max_int64, 2}, - {-324.9235, 246569139.51, -324, 246569139}, - {-inf_double, -0.983, min_int64, 0}, - {0.0832, 6 * static_cast(max_int64), 0, max_int64}, - {-21453889872.94, qNaN_double, -21453889872, 0}, - {2 * static_cast(min_int64), -0., min_int64, 0}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_I); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ ftrunc_s_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_I); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ ftrunc_s_d(w2, w0); }); - } -} - -TEST(MSA_ftrunc_u) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const float qNaN_float = std::numeric_limits::quiet_NaN(); - const double inf_double = std::numeric_limits::infinity(); - const double qNaN_double = std::numeric_limits::quiet_NaN(); - const uint32_t max_uint32 = std::numeric_limits::max(); - const uint64_t max_uint64 = std::numeric_limits::max(); - - const struct TestCaseMsa2RF_F_U tc_s[] = { - {inf_float, 2.345f, -324.9235f, 30004.51f, max_uint32, 2, 0, 30004}, - {-inf_float, 0.983f, 0.0832f, static_cast(max_uint32) * 3., 0, 0, - 0, max_uint32}, - {23.125f, qNaN_float, -0.982, -0.f, 23, 0, 0, 0}}; - - const struct TestCaseMsa2RF_D_U tc_d[] = { - {inf_double, 2.345, max_uint64, 2}, - {-324.9235, 246569139.51, 0, 246569139}, - {-inf_double, -0.983, 0, 0}, - {0.0832, 6 * static_cast(max_uint64), 0, max_uint64}, - {21453889872.94, qNaN_double, 21453889872, 0}, - {0.9889, -0., 0, 0}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_U); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ ftrunc_u_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_U); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ ftrunc_u_d(w2, w0); }); - } -} - -struct TestCaseMsa2RF_F_F { - float ws1; - float ws2; - float ws3; - float ws4; - float exp_res_1; - float exp_res_2; - float exp_res_3; - float exp_res_4; -}; - -struct TestCaseMsa2RF_D_D { - double ws1; - double ws2; - double exp_res_1; - double exp_res_2; -}; - -TEST(MSA_fsqrt) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const double inf_double = std::numeric_limits::infinity(); - - const struct TestCaseMsa2RF_F_F tc_s[] = { - {81.f, 576.f, inf_float, -0.f, 9.f, 24.f, inf_float, -0.f}}; - - const struct TestCaseMsa2RF_D_D tc_d[] = {{81., inf_double, 9., inf_double}, - {331776., -0., 576, -0.}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ fsqrt_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ fsqrt_d(w2, w0); }); - } -} - -TEST(MSA_frsqrt) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const double inf_double = std::numeric_limits::infinity(); - - const struct TestCaseMsa2RF_F_F tc_s[] = { - {81.f, 576.f, inf_float, -0.f, 1.f / 9.f, 1.f / 24.f, 0.f, -inf_float}, - {0.f, 1.f / 576.f, 1.f / 81.f, 1.f / 4.f, inf_float, 24.f, 9.f, 2.f}}; - - const struct TestCaseMsa2RF_D_D tc_d[] = { - {81., inf_double, 1. / 9., 0.}, - {331776., -0., 1. / 576., -inf_double}, - {0., 1. / 81, inf_double, 9.}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ frsqrt_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ frsqrt_d(w2, w0); }); - } -} - -TEST(MSA_frcp) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const double inf_double = std::numeric_limits::infinity(); - - const struct TestCaseMsa2RF_F_F tc_s[] = { - {12.f, 576.f, inf_float, -0.f, 1.f / 12.f, 1.f / 576.f, 0.f, -inf_float}, - {0.f, 1.f / 576.f, -inf_float, 1.f / 400.f, inf_float, 576.f, -0.f, - 400.f}}; - - const struct TestCaseMsa2RF_D_D tc_d[] = { - {81., inf_double, 1. / 81., 0.}, - {331777., -0., 1. / 331777., -inf_double}, - {0., 1. / 80, inf_double, 80.}, - {1. / 40000., -inf_double, 40000., -0.}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ frcp_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ frcp_d(w2, w0); }); - } -} - -void test_frint_s(size_t data_size, TestCaseMsa2RF_F_F tc_d[], - int rounding_mode) { - for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_F_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [&rounding_mode](MacroAssembler& assm) { - MSAControlRegister msareg = {kMSACSRRegister}; - __ li(t0, static_cast(rounding_mode)); - __ cfcmsa(t1, msareg); - __ ctcmsa(msareg, t0); - __ frint_w(w2, w0); - __ ctcmsa(msareg, t1); - }); - } -} - -void test_frint_d(size_t data_size, TestCaseMsa2RF_D_D tc_d[], - int rounding_mode) { - for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_D_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [&rounding_mode](MacroAssembler& assm) { - MSAControlRegister msareg = {kMSACSRRegister}; - __ li(t0, static_cast(rounding_mode)); - __ cfcmsa(t1, msareg); - __ ctcmsa(msareg, t0); - __ frint_d(w2, w0); - __ ctcmsa(msareg, t1); - }); - } -} - -TEST(MSA_frint) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsa2RF_F_F tc_s1[] = { - {0.f, 4.51f, 1.49f, -12.51f, 0.f, 5.f, 1.f, -13.f}, - {-1.32f, -23.38f, 2.8f, -32.5f, -1.f, -23.f, 3.f, -32.f}}; - - struct TestCaseMsa2RF_D_D tc_d1[] = {{0., 4.51, 0., 5.}, - {1.49, -12.51, 1., -13.}, - {-1.32, -23.38, -1., -23.}, - {2.8, -32.6, 3., -33.}}; - - test_frint_s(sizeof(tc_s1), tc_s1, kRoundToNearest); - test_frint_d(sizeof(tc_d1), tc_d1, kRoundToNearest); - - struct TestCaseMsa2RF_F_F tc_s2[] = { - {0.f, 4.5f, 1.49f, -12.51f, 0.f, 4.f, 1.f, -12.f}, - {-1.f, -23.38f, 2.8f, -32.6f, -1.f, -23.f, 2.f, -32.f}}; - - struct TestCaseMsa2RF_D_D tc_d2[] = {{0., 4.5, 0., 4.}, - {1.49, -12.51, 1., -12.}, - {-1., -23.38, -1., -23.}, - {2.8, -32.6, 2., -32.}}; - - test_frint_s(sizeof(tc_s2), tc_s2, kRoundToZero); - test_frint_d(sizeof(tc_d2), tc_d2, kRoundToZero); - - struct TestCaseMsa2RF_F_F tc_s3[] = { - {0.f, 4.5f, 1.49f, -12.51f, 0.f, 5.f, 2.f, -12.f}, - {-1.f, -23.38f, 2.8f, -32.6f, -1.f, -23.f, 3.f, -32.f}}; - - struct TestCaseMsa2RF_D_D tc_d3[] = {{0., 4.5, 0., 5.}, - {1.49, -12.51, 2., -12.}, - {-1., -23.38, -1., -23.}, - {2.8, -32.6, 3., -32.}}; - - test_frint_s(sizeof(tc_s3), tc_s3, kRoundToPlusInf); - test_frint_d(sizeof(tc_d3), tc_d3, kRoundToPlusInf); - - struct TestCaseMsa2RF_F_F tc_s4[] = { - {0.f, 4.5f, 1.49f, -12.51f, 0.f, 4.f, 1.f, -13.f}, - {-1.f, -23.38f, 2.8f, -32.6f, -1.f, -24.f, 2.f, -33.f}}; - - struct TestCaseMsa2RF_D_D tc_d4[] = {{0., 4.5, 0., 4.}, - {1.49, -12.51, 1., -13.}, - {-1., -23.38, -1., -24.}, - {2.8, -32.6, 2., -33.}}; - - test_frint_s(sizeof(tc_s4), tc_s4, kRoundToMinusInf); - test_frint_d(sizeof(tc_d4), tc_d4, kRoundToMinusInf); -} - -TEST(MSA_flog2) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const double inf_double = std::numeric_limits::infinity(); - - struct TestCaseMsa2RF_F_F tc_s[] = { - {std::ldexp(0.58f, -48), std::ldexp(0.5f, 110), std::ldexp(1.11f, -130), - inf_float, -49.f, 109.f, -130.f, inf_float}, - {0.f, -0.f, std::ldexp(0.89f, -12), std::ldexp(0.32f, 126), -inf_float, - -inf_float, -13.f, 124.f}}; - - struct TestCaseMsa2RF_D_D tc_d[] = { - {std::ldexp(0.58, -48), std::ldexp(0.5, 110), -49., 109.}, - {std::ldexp(1.11, -1050), inf_double, -1050., inf_double}, - {0., -0., -inf_double, -inf_double}, - {std::ldexp(0.32, 1021), std::ldexp(1.23, -123), 1019., -123.}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_F_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ flog2_w(w2, w0); }); - } - - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_D_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ flog2_d(w2, w0); }); - } -} - -void test_ftint_s_s(size_t data_size, TestCaseMsa2RF_F_I tc_d[], - int rounding_mode) { - for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_F_I); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [&rounding_mode](MacroAssembler& assm) { - MSAControlRegister msareg = {kMSACSRRegister}; - __ li(t0, static_cast(rounding_mode)); - __ cfcmsa(t1, msareg); - __ ctcmsa(msareg, t0); - __ ftint_s_w(w2, w0); - __ ctcmsa(msareg, t1); - }); - } -} - -void test_ftint_s_d(size_t data_size, TestCaseMsa2RF_D_I tc_d[], - int rounding_mode) { - for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_D_I); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [&rounding_mode](MacroAssembler& assm) { - MSAControlRegister msareg = {kMSACSRRegister}; - __ li(t0, static_cast(rounding_mode)); - __ cfcmsa(t1, msareg); - __ ctcmsa(msareg, t0); - __ ftint_s_d(w2, w0); - __ ctcmsa(msareg, t1); - }); - } -} - -TEST(MSA_ftint_s) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const double inf_double = std::numeric_limits::infinity(); - const int32_t int32_max = std::numeric_limits::max(); - const int32_t int32_min = std::numeric_limits::min(); - const int64_t int64_max = std::numeric_limits::max(); - const int64_t int64_min = std::numeric_limits::min(); - - struct TestCaseMsa2RF_F_I tc_s1[] = { - {0.f, 4.51f, 1.49f, -12.51f, 0, 5, 1, -13}, - {-0.32f, -23.38f, 2.8f, -32.6f, 0, -23, 3, -33}, - {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max, - int32_min, int32_min, int32_max}}; - - struct TestCaseMsa2RF_D_I tc_d1[] = { - {0., 4.51, 0, 5}, - {1.49, -12.51, 1, -13}, - {-0.32, -23.38, 0, -23}, - {2.8, -32.6, 3, -33}, - {inf_double, -inf_double, int64_max, int64_min}, - {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}}; - - test_ftint_s_s(sizeof(tc_s1), tc_s1, kRoundToNearest); - test_ftint_s_d(sizeof(tc_d1), tc_d1, kRoundToNearest); - - struct TestCaseMsa2RF_F_I tc_s2[] = { - {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, -12}, - {-0.f, -23.38f, 2.8f, -32.6f, -0, -23, 2, -32}, - {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max, - int32_min, int32_min, int32_max}}; - - struct TestCaseMsa2RF_D_I tc_d2[] = { - {0., 4.5, 0, 4}, - {1.49, -12.51, 1, -12}, - {-0., -23.38, -0, -23}, - {2.8, -32.6, 2, -32}, - {inf_double, -inf_double, int64_max, int64_min}, - {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}}; - - test_ftint_s_s(sizeof(tc_s2), tc_s2, kRoundToZero); - test_ftint_s_d(sizeof(tc_d2), tc_d2, kRoundToZero); - - struct TestCaseMsa2RF_F_I tc_s3[] = { - {0.f, 4.5f, 1.49f, -12.51f, 0, 5, 2, -12}, - {-0.f, -23.38f, 2.8f, -32.6f, -0, -23, 3, -32}, - {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max, - int32_min, int32_min, int32_max}}; - - struct TestCaseMsa2RF_D_I tc_d3[] = { - {0., 4.5, 0, 5}, - {1.49, -12.51, 2, -12}, - {-0., -23.38, -0, -23}, - {2.8, -32.6, 3, -32}, - {inf_double, -inf_double, int64_max, int64_min}, - {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}}; - - test_ftint_s_s(sizeof(tc_s3), tc_s3, kRoundToPlusInf); - test_ftint_s_d(sizeof(tc_d3), tc_d3, kRoundToPlusInf); - - struct TestCaseMsa2RF_F_I tc_s4[] = { - {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, -13}, - {-0.f, -23.38f, 2.8f, -32.6f, -0, -24, 2, -33}, - {inf_float, -inf_float, 3.f * int32_min, 4.f * int32_max, int32_max, - int32_min, int32_min, int32_max}}; - - struct TestCaseMsa2RF_D_I tc_d4[] = { - {0., 4.5, 0, 4}, - {1.49, -12.51, 1, -13}, - {-0., -23.38, -0, -24}, - {2.8, -32.6, 2, -33}, - {inf_double, -inf_double, int64_max, int64_min}, - {33.23 * int64_min, 4000. * int64_max, int64_min, int64_max}}; - - test_ftint_s_s(sizeof(tc_s4), tc_s4, kRoundToMinusInf); - test_ftint_s_d(sizeof(tc_d4), tc_d4, kRoundToMinusInf); -} - -void test_ftint_u_s(size_t data_size, TestCaseMsa2RF_F_U tc_d[], - int rounding_mode) { - for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_F_U); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [&rounding_mode](MacroAssembler& assm) { - MSAControlRegister msareg = {kMSACSRRegister}; - __ li(t0, static_cast(rounding_mode)); - __ cfcmsa(t1, msareg); - __ ctcmsa(msareg, t0); - __ ftint_u_w(w2, w0); - __ ctcmsa(msareg, t1); - }); - } -} - -void test_ftint_u_d(size_t data_size, TestCaseMsa2RF_D_U tc_d[], - int rounding_mode) { - for (size_t i = 0; i < data_size / sizeof(TestCaseMsa2RF_D_U); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [&rounding_mode](MacroAssembler& assm) { - MSAControlRegister msareg = {kMSACSRRegister}; - __ li(t0, static_cast(rounding_mode)); - __ cfcmsa(t1, msareg); - __ ctcmsa(msareg, t0); - __ ftint_u_d(w2, w0); - __ ctcmsa(msareg, t1); - }); - } -} - -TEST(MSA_ftint_u) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const double inf_double = std::numeric_limits::infinity(); - const uint32_t uint32_max = std::numeric_limits::max(); - const uint64_t uint64_max = std::numeric_limits::max(); - - struct TestCaseMsa2RF_F_U tc_s1[] = { - {0.f, 4.51f, 1.49f, -12.51f, 0, 5, 1, 0}, - {-0.32f, 23.38f, 2.8f, 32.6f, 0, 23, 3, 33}, - {inf_float, -inf_float, 0, 4.f * uint32_max, uint32_max, 0, 0, - uint32_max}}; - - struct TestCaseMsa2RF_D_U tc_d1[] = { - {0., 4.51, 0, 5}, - {1.49, -12.51, 1, 0}, - {-0.32, 23.38, 0, 23}, - {2.8, 32.6, 3, 33}, - {inf_double, -inf_double, uint64_max, 0}, - {-0., 4000. * uint64_max, 0, uint64_max}}; - - test_ftint_u_s(sizeof(tc_s1), tc_s1, kRoundToNearest); - test_ftint_u_d(sizeof(tc_d1), tc_d1, kRoundToNearest); - - struct TestCaseMsa2RF_F_U tc_s2[] = { - {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, 0}, - {-0.f, 23.38f, 2.8f, 32.6f, 0, 23, 2, 32}, - {inf_float, -inf_float, 0., 4.f * uint32_max, uint32_max, 0, 0, - uint32_max}}; - - struct TestCaseMsa2RF_D_U tc_d2[] = { - {0., 4.5, 0, 4}, - {1.49, -12.51, 1, 0}, - {-0., 23.38, 0, 23}, - {2.8, 32.6, 2, 32}, - {inf_double, -inf_double, uint64_max, 0}, - {-0.2345, 4000. * uint64_max, 0, uint64_max}}; - - test_ftint_u_s(sizeof(tc_s2), tc_s2, kRoundToZero); - test_ftint_u_d(sizeof(tc_d2), tc_d2, kRoundToZero); - - struct TestCaseMsa2RF_F_U tc_s3[] = { - {0.f, 4.5f, 1.49f, -12.51f, 0, 5, 2, 0}, - {-0.f, 23.38f, 2.8f, 32.6f, 0, 24, 3, 33}, - {inf_float, -inf_float, 0, 4.f * uint32_max, uint32_max, 0, 0, - uint32_max}}; - - struct TestCaseMsa2RF_D_U tc_d3[] = { - {0., 4.5, 0, 5}, - {1.49, -12.51, 2, 0}, - {-0., 23.38, -0, 24}, - {2.8, 32.6, 3, 33}, - {inf_double, -inf_double, uint64_max, 0}, - {-0.5252, 4000. * uint64_max, 0, uint64_max}}; - - test_ftint_u_s(sizeof(tc_s3), tc_s3, kRoundToPlusInf); - test_ftint_u_d(sizeof(tc_d3), tc_d3, kRoundToPlusInf); - - struct TestCaseMsa2RF_F_U tc_s4[] = { - {0.f, 4.5f, 1.49f, -12.51f, 0, 4, 1, 0}, - {-0.f, 23.38f, 2.8f, 32.6f, 0, 23, 2, 32}, - {inf_float, -inf_float, 0, 4.f * uint32_max, uint32_max, 0, 0, - uint32_max}}; - - struct TestCaseMsa2RF_D_U tc_d4[] = { - {0., 4.5, 0, 4}, - {1.49, -12.51, 1, 0}, - {-0., 23.38, -0, 23}, - {2.8, 32.6, 2, 32}, - {inf_double, -inf_double, uint64_max, 0}, - {-0.098797, 4000. * uint64_max, 0, uint64_max}}; - - test_ftint_u_s(sizeof(tc_s4), tc_s4, kRoundToMinusInf); - test_ftint_u_d(sizeof(tc_d4), tc_d4, kRoundToMinusInf); -} - -struct TestCaseMsa2RF_U_F { - uint32_t ws1; - uint32_t ws2; - uint32_t ws3; - uint32_t ws4; - float exp_res_1; - float exp_res_2; - float exp_res_3; - float exp_res_4; -}; - -struct TestCaseMsa2RF_U_D { - uint64_t ws1; - uint64_t ws2; - double exp_res_1; - double exp_res_2; -}; - -TEST(MSA_ffint_u) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsa2RF_U_F tc_s[] = { - {0, 345, 234, 1000, 0.f, 345.f, 234.f, 1000.f}}; - - struct TestCaseMsa2RF_U_D tc_d[] = {{0, 345, 0., 345.}, - {234, 1000, 234., 1000.}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ ffint_u_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ ffint_u_d(w2, w0); }); - } -} - -struct TestCaseMsa2RF_I_F { - int32_t ws1; - int32_t ws2; - int32_t ws3; - int32_t ws4; - float exp_res_1; - float exp_res_2; - float exp_res_3; - float exp_res_4; -}; - -struct TestCaseMsa2RF_I_D { - int64_t ws1; - int64_t ws2; - double exp_res_1; - double exp_res_2; -}; - -TEST(MSA_ffint_s) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsa2RF_I_F tc_s[] = { - {0, 345, -234, 1000, 0.f, 345.f, -234.f, 1000.f}}; - - struct TestCaseMsa2RF_I_D tc_d[] = {{0, 345, 0., 345.}, - {-234, 1000, -234., 1000.}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_I_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ ffint_s_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_I_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ ffint_s_d(w2, w0); }); - } -} - -struct TestCaseMsa2RF_U16_F { - uint16_t ws1; - uint16_t ws2; - uint16_t ws3; - uint16_t ws4; - uint16_t ws5; - uint16_t ws6; - uint16_t ws7; - uint16_t ws8; - float exp_res_1; - float exp_res_2; - float exp_res_3; - float exp_res_4; -}; - -struct TestCaseMsa2RF_F_D { - float ws1; - float ws2; - float ws3; - float ws4; - double exp_res_1; - double exp_res_2; -}; - -TEST(MSA_fexupl) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const double inf_double = std::numeric_limits::infinity(); - - struct TestCaseMsa2RF_U16_F tc_s[] = { - {1, 2, 0x7C00, 0x0C00, 0, 0x7C00, 0xFC00, 0x8000, 0.f, inf_float, - -inf_float, -0.f}, - {0xFC00, 0xFFFF, 0x00FF, 0x8000, 0x81FE, 0x8000, 0x0345, 0xAAAA, - -3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f}, - {3, 4, 0x5555, 6, 0x2AAA, 0x8700, 0x7777, 0x6A8B, 5.2062988281e-2f, - -1.06811523458e-4f, 3.0576e4f, 3.35e3f}}; - - struct TestCaseMsa2RF_F_D tc_d[] = { - {0.f, 123.456f, inf_float, -0.f, inf_double, -0.}, - {-inf_float, -3.f, 0.f, -inf_float, 0., -inf_double}, - {2.3f, 3., 1.37747639043129518071e-41f, -3.22084585277826e35f, - 1.37747639043129518071e-41, -3.22084585277826e35}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ fexupl_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ fexupl_d(w2, w0); }); - } -} - -TEST(MSA_fexupr) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const double inf_double = std::numeric_limits::infinity(); - - struct TestCaseMsa2RF_U16_F tc_s[] = { - {0, 0x7C00, 0xFC00, 0x8000, 1, 2, 0x7C00, 0x0C00, 0.f, inf_float, - -inf_float, -0.f}, - {0x81FE, 0x8000, 0x0345, 0xAAAA, 0xFC00, 0xFFFF, 0x00FF, 0x8000, - -3.0398368835e-5f, -0.f, 4.9889088e-5f, -5.2062988281e-2f}, - {0x2AAA, 0x8700, 0x7777, 0x6A8B, 3, 4, 0x5555, 6, 5.2062988281e-2f, - -1.06811523458e-4f, 3.0576e4f, 3.35e3f}}; - - struct TestCaseMsa2RF_F_D tc_d[] = { - {inf_float, -0.f, 0.f, 123.456f, inf_double, -0.}, - {0.f, -inf_float, -inf_float, -3.f, 0., -inf_double}, - {1.37747639043129518071e-41f, -3.22084585277826e35f, 2.3f, 3., - 1.37747639043129518071e-41, -3.22084585277826e35}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ fexupr_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_F_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ fexupr_d(w2, w0); }); - } -} - -struct TestCaseMsa2RF_U32_D { - uint32_t ws1; - uint32_t ws2; - uint32_t ws3; - uint32_t ws4; - double exp_res_1; - double exp_res_2; -}; - -TEST(MSA_ffql) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsa2RF_U16_F tc_s[] = {{0, 3, 0xFFFF, 0x8000, 0x8000, 0xE000, - 0x0FF0, 0, -1.f, -0.25f, - 0.12451171875f, 0.f}}; - - struct TestCaseMsa2RF_U32_D tc_d[] = { - {0, 45, 0x80000000, 0xE0000000, -1., -0.25}, - {0x28379, 0xAAAA5555, 0x024903D3, 0, 17.853239085525274277e-3, 0.}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ ffql_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ ffql_d(w2, w0); }); - } -} - -TEST(MSA_ffqr) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsa2RF_U16_F tc_s[] = {{0x8000, 0xE000, 0x0FF0, 0, 0, 3, - 0xFFFF, 0x8000, -1.f, -0.25f, - 0.12451171875f, 0.f}}; - - struct TestCaseMsa2RF_U32_D tc_d[] = { - {0x80000000, 0xE0000000, 0, 45, -1., -0.25}, - {0x024903D3, 0, 0x28379, 0xAAAA5555, 17.853239085525274277e-3, 0.}}; - - for (size_t i = 0; i < sizeof(tc_s) / sizeof(TestCaseMsa2RF_U16_F); ++i) { - run_msa_2r(reinterpret_cast(&tc_s[i]), - [](MacroAssembler& assm) { __ ffqr_w(w2, w0); }); - } - for (size_t i = 0; i < sizeof(tc_d) / sizeof(TestCaseMsa2RF_U32_D); ++i) { - run_msa_2r(reinterpret_cast(&tc_d[i]), - [](MacroAssembler& assm) { __ ffqr_d(w2, w0); }); - } -} - -struct TestCaseMsaVector { - uint64_t wd_lo; - uint64_t wd_hi; - uint64_t ws_lo; - uint64_t ws_hi; - uint64_t wt_lo; - uint64_t wt_hi; -}; - -template -void run_msa_vector(struct TestCaseMsaVector* input, - InstFunc GenerateVectorInstructionFunc, - OperFunc GenerateOperationFunc) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - msa_reg_t res; - - load_elements_of_vector(&assm, &(input->ws_lo), w0, t0, t1); - load_elements_of_vector(&assm, &(input->wt_lo), w2, t0, t1); - load_elements_of_vector(&assm, &(input->wd_lo), w4, t0, t1); - - GenerateVectorInstructionFunc(assm); - - store_elements_of_vector(&assm, w4, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&res, 0, 0, 0, 0)); - - CHECK_EQ(GenerateOperationFunc(input->wd_lo, input->ws_lo, input->wt_lo), - res.d[0]); - CHECK_EQ(GenerateOperationFunc(input->wd_hi, input->ws_hi, input->wt_hi), - res.d[1]); -} - -TEST(MSA_vector) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaVector tc[] = { - // wd_lo, wd_hi, ws_lo, ws_hi, wt_lo, wt_hi - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0xDCD39D91F9057627, - 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 0x49547AAD691DA4CA}, - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0x401614523D830549, - 0xD7C46D613F50EDDD, 0x52284CBC60A1562B, 0x1756ED510D8849CD}, - {0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 0xD6E2D2EBCB40D72F, - 0x13A619AFCE67B079, 0x36CCE284343E40F9, 0xB4E8F44FD148BF7F}}; - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaVector); ++i) { - run_msa_vector( - &tc[i], [](MacroAssembler& assm) { __ and_v(w4, w0, w2); }, - [](uint64_t wd, uint64_t ws, uint64_t wt) { return ws & wt; }); - run_msa_vector( - &tc[i], [](MacroAssembler& assm) { __ or_v(w4, w0, w2); }, - [](uint64_t wd, uint64_t ws, uint64_t wt) { return ws | wt; }); - run_msa_vector( - &tc[i], [](MacroAssembler& assm) { __ nor_v(w4, w0, w2); }, - [](uint64_t wd, uint64_t ws, uint64_t wt) { return ~(ws | wt); }); - run_msa_vector( - &tc[i], [](MacroAssembler& assm) { __ xor_v(w4, w0, w2); }, - [](uint64_t wd, uint64_t ws, uint64_t wt) { return ws ^ wt; }); - run_msa_vector(&tc[i], [](MacroAssembler& assm) { __ bmnz_v(w4, w0, w2); }, - [](uint64_t wd, uint64_t ws, uint64_t wt) { - return (ws & wt) | (wd & ~wt); - }); - run_msa_vector(&tc[i], [](MacroAssembler& assm) { __ bmz_v(w4, w0, w2); }, - [](uint64_t wd, uint64_t ws, uint64_t wt) { - return (ws & ~wt) | (wd & wt); - }); - run_msa_vector(&tc[i], [](MacroAssembler& assm) { __ bsel_v(w4, w0, w2); }, - [](uint64_t wd, uint64_t ws, uint64_t wt) { - return (ws & ~wd) | (wt & wd); - }); - } -} - -struct TestCaseMsaBit { - uint64_t wd_lo; - uint64_t wd_hi; - uint64_t ws_lo; - uint64_t ws_hi; - uint32_t m; -}; - -template -void run_msa_bit(struct TestCaseMsaBit* input, InstFunc GenerateInstructionFunc, - OperFunc GenerateOperationFunc) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - msa_reg_t res; - - load_elements_of_vector(&assm, &(input->ws_lo), w0, t0, t1); - load_elements_of_vector(&assm, &(input->wd_lo), w2, t0, t1); - - GenerateInstructionFunc(assm, input->m); - - store_elements_of_vector(&assm, w2, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&res, 0, 0, 0, 0)); - - CHECK_EQ(GenerateOperationFunc(input->wd_lo, input->ws_lo, input->m), - res.d[0]); - CHECK_EQ(GenerateOperationFunc(input->wd_hi, input->ws_hi, input->m), - res.d[1]); -} - -TEST(MSA_slli_srai_srli) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaBit tc[] = { - // wd_lo, wd_hi ws_lo, ws_hi, m - {0, 0, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 3}, - {0, 0, 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5}, - {0, 0, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9}, - {0, 0, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13}, - {0, 0, 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21}, - {0, 0, 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30}, - {0, 0, 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45}, - {0, 0, 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}}; - -#define SLLI_SRLI_DF(lanes, mask, func) \ - [](uint64_t wd, uint64_t ws, uint32_t m) { \ - uint64_t res = 0; \ - int elem_size = kMSARegSize / lanes; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - uint64_t elem = (ws >> shift) & mask; \ - res |= ((func)&mask) << shift; \ - } \ - return res; \ - } - -#define SRAI_DF(lanes, mask, func) \ - [](uint64_t wd, uint64_t ws, uint32_t m) { \ - uint64_t res = 0; \ - int elem_size = kMSARegSize / lanes; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - int64_t elem = \ - static_cast(((ws >> shift) & mask) << (64 - elem_size)) >> \ - (64 - elem_size); \ - res |= static_cast((func)&mask) << shift; \ - } \ - return res; \ - } - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaBit); ++i) { - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ slli_b(w2, w0, m % 8); }, - SLLI_SRLI_DF(kMSALanesByte, UINT8_MAX, (elem << (m % elem_size)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ slli_h(w2, w0, m % 16); }, - SLLI_SRLI_DF(kMSALanesHalf, UINT16_MAX, (elem << (m % elem_size)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ slli_w(w2, w0, m % 32); }, - SLLI_SRLI_DF(kMSALanesWord, UINT32_MAX, (elem << (m % elem_size)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ slli_d(w2, w0, m % 64); }, - SLLI_SRLI_DF(kMSALanesDword, UINT64_MAX, (elem << (m % elem_size)))); - - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srli_b(w2, w0, m % 8); }, - SLLI_SRLI_DF(kMSALanesByte, UINT8_MAX, (elem >> (m % elem_size)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srli_h(w2, w0, m % 16); }, - SLLI_SRLI_DF(kMSALanesHalf, UINT16_MAX, (elem >> (m % elem_size)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srli_w(w2, w0, m % 32); }, - SLLI_SRLI_DF(kMSALanesWord, UINT32_MAX, (elem >> (m % elem_size)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srli_d(w2, w0, m % 64); }, - SLLI_SRLI_DF(kMSALanesDword, UINT64_MAX, (elem >> (m % elem_size)))); - - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srlri_b(w2, w0, m % 8); }, - SLLI_SRLI_DF( - kMSALanesByte, UINT8_MAX, - (elem >> (m % elem_size)) + ((elem >> (m % elem_size - 1)) & 0x1))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srlri_h(w2, w0, m % 16); }, - SLLI_SRLI_DF( - kMSALanesHalf, UINT16_MAX, - (elem >> (m % elem_size)) + ((elem >> (m % elem_size - 1)) & 0x1))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srlri_w(w2, w0, m % 32); }, - SLLI_SRLI_DF( - kMSALanesWord, UINT32_MAX, - (elem >> (m % elem_size)) + ((elem >> (m % elem_size - 1)) & 0x1))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srlri_d(w2, w0, m % 64); }, - SLLI_SRLI_DF( - kMSALanesDword, UINT64_MAX, - (elem >> (m % elem_size)) + ((elem >> (m % elem_size - 1)) & 0x1))); - - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srai_b(w2, w0, m % 8); }, - SRAI_DF(kMSALanesByte, UINT8_MAX, - ArithmeticShiftRight(elem, m % elem_size))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srai_h(w2, w0, m % 16); }, - SRAI_DF(kMSALanesHalf, UINT16_MAX, - ArithmeticShiftRight(elem, m % elem_size))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srai_w(w2, w0, m % 32); }, - SRAI_DF(kMSALanesWord, UINT32_MAX, - ArithmeticShiftRight(elem, m % elem_size))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srai_d(w2, w0, m % 64); }, - SRAI_DF(kMSALanesDword, UINT64_MAX, - ArithmeticShiftRight(elem, m % elem_size))); - - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srari_b(w2, w0, m % 8); }, - SRAI_DF(kMSALanesByte, UINT8_MAX, - ArithmeticShiftRight(elem, m % elem_size) + - ((elem >> (m % elem_size - 1)) & 0x1))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srari_h(w2, w0, m % 16); }, - SRAI_DF(kMSALanesHalf, UINT16_MAX, - ArithmeticShiftRight(elem, m % elem_size) + - ((elem >> (m % elem_size - 1)) & 0x1))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srari_w(w2, w0, m % 32); }, - SRAI_DF(kMSALanesWord, UINT32_MAX, - ArithmeticShiftRight(elem, m % elem_size) + - ((elem >> (m % elem_size - 1)) & 0x1))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ srari_d(w2, w0, m % 64); }, - SRAI_DF(kMSALanesDword, UINT64_MAX, - ArithmeticShiftRight(elem, m % elem_size) + - ((elem >> (m % elem_size - 1)) & 0x1))); - } -#undef SLLI_SRLI_DF -#undef SRAI_DF -} - -TEST(MSA_bclri_bseti_bnegi) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaBit tc[] = { - // wd_lo, wd_hi, ws_lo, ws_hi, m - {0, 0, 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 3}, - {0, 0, 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5}, - {0, 0, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9}, - {0, 0, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13}, - {0, 0, 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21}, - {0, 0, 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30}, - {0, 0, 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45}, - {0, 0, 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}}; - -#define BCLRI_BSETI_BNEGI_DF(lanes, mask, func) \ - [](uint64_t wd, uint64_t ws, uint32_t m) { \ - uint64_t res = 0; \ - int elem_size = kMSARegSize / lanes; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - uint64_t elem = (ws >> shift) & mask; \ - res |= ((func)&mask) << shift; \ - } \ - return res; \ - } - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaBit); ++i) { - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bclri_b(w2, w0, m % 8); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesByte, UINT8_MAX, - (~(1ull << (m % elem_size)) & elem))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bclri_h(w2, w0, m % 16); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesHalf, UINT16_MAX, - (~(1ull << (m % elem_size)) & elem))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bclri_w(w2, w0, m % 32); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesWord, UINT32_MAX, - (~(1ull << (m % elem_size)) & elem))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bclri_d(w2, w0, m % 64); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesDword, UINT64_MAX, - (~(1ull << (m % elem_size)) & elem))); - - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bseti_b(w2, w0, m % 8); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesByte, UINT8_MAX, - ((1ull << (m % elem_size)) | elem))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bseti_h(w2, w0, m % 16); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesHalf, UINT16_MAX, - ((1ull << (m % elem_size)) | elem))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bseti_w(w2, w0, m % 32); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesWord, UINT32_MAX, - ((1ull << (m % elem_size)) | elem))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bseti_d(w2, w0, m % 64); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesDword, UINT64_MAX, - ((1ull << (m % elem_size)) | elem))); - - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bnegi_b(w2, w0, m % 8); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesByte, UINT8_MAX, - ((1ull << (m % elem_size)) ^ elem))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bnegi_h(w2, w0, m % 16); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesHalf, UINT16_MAX, - ((1ull << (m % elem_size)) ^ elem))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bnegi_w(w2, w0, m % 32); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesWord, UINT32_MAX, - ((1ull << (m % elem_size)) ^ elem))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ bnegi_d(w2, w0, m % 64); }, - BCLRI_BSETI_BNEGI_DF(kMSALanesDword, UINT64_MAX, - ((1ull << (m % elem_size)) ^ elem))); - } -#undef BCLRI_BSETI_BNEGI_DF -} - -TEST(MSA_binsli_binsri) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaBit tc[] = {// wd_lo, wd_hi, ws_lo, ws_hi, m - {0x53F4457553BBD5B4, 0x5FB8250EACC296B2, - 0xF35862E13E38F8B0, 0x4F41FFDEF2BFE636, 3}, - {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925, - 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5}, - {0x53F4457553BBD5B4, 0x5FB8250EACC296B2, - 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9}, - {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925, - 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13}, - {0x53F4457553BBD5B4, 0x5FB8250EACC296B2, - 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21}, - {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925, - 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30}, - {0x53F4457553BBD5B4, 0x5FB8250EACC296B2, - 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45}, - {0xF61BFDB0F312E6FC, 0xC9437568DD1EA925, - 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}}; - -#define BINSLI_BINSRI_DF(lanes, mask, func) \ - [](uint64_t wd, uint64_t ws, uint32_t m) { \ - uint64_t res = 0; \ - int elem_size = kMSARegSize / lanes; \ - int bits = m % elem_size + 1; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - uint64_t ws_elem = (ws >> shift) & mask; \ - if (bits == elem_size) { \ - res |= (ws_elem & mask) << shift; \ - } else { \ - uint64_t r_mask = (1ull << bits) - 1; \ - uint64_t l_mask = r_mask << (elem_size - bits); \ - USE(l_mask); \ - uint64_t wd_elem = (wd >> shift) & mask; \ - res |= ((func)&mask) << shift; \ - } \ - } \ - return res; \ - } - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaBit); ++i) { - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ binsli_b(w2, w0, m % 8); }, - BINSLI_BINSRI_DF(kMSALanesByte, UINT8_MAX, - ((ws_elem & l_mask) | (wd_elem & ~l_mask)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ binsli_h(w2, w0, m % 16); }, - BINSLI_BINSRI_DF(kMSALanesHalf, UINT16_MAX, - ((ws_elem & l_mask) | (wd_elem & ~l_mask)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ binsli_w(w2, w0, m % 32); }, - BINSLI_BINSRI_DF(kMSALanesWord, UINT32_MAX, - ((ws_elem & l_mask) | (wd_elem & ~l_mask)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ binsli_d(w2, w0, m % 64); }, - BINSLI_BINSRI_DF(kMSALanesDword, UINT64_MAX, - ((ws_elem & l_mask) | (wd_elem & ~l_mask)))); - - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ binsri_b(w2, w0, m % 8); }, - BINSLI_BINSRI_DF(kMSALanesByte, UINT8_MAX, - ((ws_elem & r_mask) | (wd_elem & ~r_mask)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ binsri_h(w2, w0, m % 16); }, - BINSLI_BINSRI_DF(kMSALanesHalf, UINT16_MAX, - ((ws_elem & r_mask) | (wd_elem & ~r_mask)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ binsri_w(w2, w0, m % 32); }, - BINSLI_BINSRI_DF(kMSALanesWord, UINT32_MAX, - ((ws_elem & r_mask) | (wd_elem & ~r_mask)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ binsri_d(w2, w0, m % 64); }, - BINSLI_BINSRI_DF(kMSALanesDword, UINT64_MAX, - ((ws_elem & r_mask) | (wd_elem & ~r_mask)))); - } -#undef BINSLI_BINSRI_DF -} - -TEST(MSA_sat_s_sat_u) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsaBit tc[] = { - // wd_lo, wd_hi, ws_lo, ws_hi, m - {0, 0, 0xF35862E13E3808B0, 0x4F41FFDEF2BFE636, 3}, - {0, 0, 0x64BE4F6DBE9CAA51, 0x6B23DE1A687D9CB9, 5}, - {0, 0, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 9}, - {0, 0, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 13}, - {0, 0, 0x566BE7BA4365B70A, 0x01EBBC1937D76CB4, 21}, - {0, 0, 0x380E2DEB9D3F8AAE, 0x017E0DE0BCC6CA42, 30}, - {0, 0, 0xA46A3A9BCB43F4E5, 0x1C62C8473BDFCFFB, 45}, - {0, 0, 0xF6759D85F23B5A2B, 0x5C042AE42C6D12C1, 61}}; - -#define SAT_DF(lanes, mask, func) \ - [](uint64_t wd, uint64_t ws, uint32_t m) { \ - uint64_t res = 0; \ - int elem_size = kMSARegSize / lanes; \ - m %= elem_size; \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - uint64_t elem_u64 = (ws >> shift) & mask; \ - int64_t elem_i64 = static_cast(elem_u64 << (64 - elem_size)) >> \ - (64 - elem_size); \ - USE(elem_i64); \ - res |= ((func)&mask) << shift; \ - } \ - return res; \ - } - -#define M_MAX_INT(x) static_cast((1LL << ((x)-1)) - 1) -#define M_MIN_INT(x) static_cast(-(1LL << ((x)-1))) -#define M_MAX_UINT(x) static_cast(-1ULL >> (64 - (x))) - - for (size_t i = 0; i < sizeof(tc) / sizeof(TestCaseMsaBit); ++i) { - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ sat_u_b(w2, w0, m % 8); }, - SAT_DF(kMSALanesByte, UINT8_MAX, - (elem_u64 < M_MAX_UINT(m + 1) ? elem_u64 : M_MAX_UINT(m + 1)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ sat_u_h(w2, w0, m % 16); }, - SAT_DF(kMSALanesHalf, UINT16_MAX, - (elem_u64 < M_MAX_UINT(m + 1) ? elem_u64 : M_MAX_UINT(m + 1)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ sat_u_w(w2, w0, m % 32); }, - SAT_DF(kMSALanesWord, UINT32_MAX, - (elem_u64 < M_MAX_UINT(m + 1) ? elem_u64 : M_MAX_UINT(m + 1)))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ sat_u_d(w2, w0, m % 64); }, - SAT_DF(kMSALanesDword, UINT64_MAX, - (elem_u64 < M_MAX_UINT(m + 1) ? elem_u64 : M_MAX_UINT(m + 1)))); - - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ sat_s_b(w2, w0, m % 8); }, - SAT_DF( - kMSALanesByte, UINT8_MAX, - (elem_i64 < M_MIN_INT(m + 1) - ? M_MIN_INT(m + 1) - : elem_i64 > M_MAX_INT(m + 1) ? M_MAX_INT(m + 1) : elem_i64))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ sat_s_h(w2, w0, m % 16); }, - SAT_DF( - kMSALanesHalf, UINT16_MAX, - (elem_i64 < M_MIN_INT(m + 1) - ? M_MIN_INT(m + 1) - : elem_i64 > M_MAX_INT(m + 1) ? M_MAX_INT(m + 1) : elem_i64))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ sat_s_w(w2, w0, m % 32); }, - SAT_DF( - kMSALanesWord, UINT32_MAX, - (elem_i64 < M_MIN_INT(m + 1) - ? M_MIN_INT(m + 1) - : elem_i64 > M_MAX_INT(m + 1) ? M_MAX_INT(m + 1) : elem_i64))); - run_msa_bit( - &tc[i], - [](MacroAssembler& assm, uint32_t m) { __ sat_s_d(w2, w0, m % 64); }, - SAT_DF( - kMSALanesDword, UINT64_MAX, - (elem_i64 < M_MIN_INT(m + 1) - ? M_MIN_INT(m + 1) - : elem_i64 > M_MAX_INT(m + 1) ? M_MAX_INT(m + 1) : elem_i64))); - } - -#undef SAT_DF -#undef M_MAX_INT -#undef M_MIN_INT -#undef M_MAX_UINT -} - -template -void run_msa_i10(int32_t input, InstFunc GenerateVectorInstructionFunc, - OperFunc GenerateOperationFunc) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - msa_reg_t res; - - GenerateVectorInstructionFunc(assm, input); - - store_elements_of_vector(&assm, w0, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&res, 0, 0, 0, 0)); - - CHECK_EQ(GenerateOperationFunc(input), res.d[0]); - CHECK_EQ(GenerateOperationFunc(input), res.d[1]); -} - -TEST(MSA_ldi) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - // signed 10bit integers: -512 .. 511 - int32_t tc[] = {0, -1, 1, 256, -256, -178, 352, -512, 511}; - -#define LDI_DF(lanes, mask) \ - [](int32_t s10) { \ - uint64_t res = 0; \ - int elem_size = kMSARegSize / lanes; \ - int64_t s10_64 = \ - ArithmeticShiftRight(static_cast(s10) << 54, 54); \ - for (int i = 0; i < lanes / 2; ++i) { \ - int shift = elem_size * i; \ - res |= static_cast(s10_64 & mask) << shift; \ - } \ - return res; \ - } - - for (size_t i = 0; i < sizeof(tc) / sizeof(int32_t); ++i) { - run_msa_i10(tc[i], - [](MacroAssembler& assm, int32_t s10) { __ ldi_b(w0, s10); }, - LDI_DF(kMSALanesByte, UINT8_MAX)); - run_msa_i10(tc[i], - [](MacroAssembler& assm, int32_t s10) { __ ldi_h(w0, s10); }, - LDI_DF(kMSALanesHalf, UINT16_MAX)); - run_msa_i10(tc[i], - [](MacroAssembler& assm, int32_t s10) { __ ldi_w(w0, s10); }, - LDI_DF(kMSALanesWord, UINT32_MAX)); - run_msa_i10(tc[i], - [](MacroAssembler& assm, int32_t s10) { __ ldi_d(w0, s10); }, - LDI_DF(kMSALanesDword, UINT64_MAX)); - } -#undef LDI_DF -} - -template -void run_msa_mi10(InstFunc GenerateVectorInstructionFunc) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - T in_test_vector[1024]; - T out_test_vector[1024]; - - T* in_array_middle = in_test_vector + arraysize(in_test_vector) / 2; - T* out_array_middle = out_test_vector + arraysize(out_test_vector) / 2; - - v8::base::RandomNumberGenerator rand_gen(FLAG_random_seed); - for (unsigned int i = 0; i < arraysize(in_test_vector); i++) { - in_test_vector[i] = static_cast(rand_gen.NextInt()); - out_test_vector[i] = 0; - } - - GenerateVectorInstructionFunc(assm); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(in_array_middle, out_array_middle, 0, 0, 0)); - - CHECK_EQ(memcmp(in_test_vector, out_test_vector, arraysize(in_test_vector)), - 0); -} - -TEST(MSA_load_store_vector) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - run_msa_mi10([](MacroAssembler& assm) { - for (int i = -512; i < 512; i += 16) { - __ ld_b(w0, MemOperand(a0, i)); - __ st_b(w0, MemOperand(a1, i)); - } - }); - run_msa_mi10([](MacroAssembler& assm) { - for (int i = -512; i < 512; i += 8) { - __ ld_h(w0, MemOperand(a0, i)); - __ st_h(w0, MemOperand(a1, i)); - } - }); - run_msa_mi10([](MacroAssembler& assm) { - for (int i = -512; i < 512; i += 4) { - __ ld_w(w0, MemOperand(a0, i)); - __ st_w(w0, MemOperand(a1, i)); - } - }); - run_msa_mi10([](MacroAssembler& assm) { - for (int i = -512; i < 512; i += 2) { - __ ld_d(w0, MemOperand(a0, i)); - __ st_d(w0, MemOperand(a1, i)); - } - }); -} - -struct TestCaseMsa3R { - uint64_t ws_lo; - uint64_t ws_hi; - uint64_t wt_lo; - uint64_t wt_hi; - uint64_t wd_lo; - uint64_t wd_hi; -}; - -static const uint64_t Unpredictable = 0x312014017725ll; - -template -void run_msa_3r(struct TestCaseMsa3R* input, InstFunc GenerateI5InstructionFunc, - OperFunc GenerateOperationFunc) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - msa_reg_t res; - - load_elements_of_vector(&assm, &(input->wt_lo), w0, t0, t1); - load_elements_of_vector(&assm, &(input->ws_lo), w1, t0, t1); - load_elements_of_vector(&assm, &(input->wd_lo), w2, t0, t1); - - GenerateI5InstructionFunc(assm); - - store_elements_of_vector(&assm, w2, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&res, 0, 0, 0, 0)); - - GenerateOperationFunc(&input->ws_lo, &input->wt_lo, &input->wd_lo); - if (input->wd_lo != Unpredictable) { - CHECK_EQ(input->wd_lo, res.d[0]); - } - if (input->wd_hi != Unpredictable) { - CHECK_EQ(input->wd_hi, res.d[1]); - } -} - -TEST(MSA_3R_instructions) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - struct TestCaseMsa3R tc[] = { - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3, - 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF, - 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8}, - {0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3, - 0xF7A594AEC8EF8A9C, 0x1169751BB9A7D9C3, 0xF7A594AEC8EF8A9C}, - {0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF, - 0x3A0D80D68B3F8BC8, 0x2B665362C4E812DF, 0x3A0D80D68B3F8BC8}, - {0xFFAB807F807FFFCD, 0x7F23FF80FF567F80, 0xFFAB807F807FFFCD, - 0x7F23FF80FF567F80, 0xFFAB807F807FFFCD, 0x7F23FF80FF567F80}, - {0x80FFEFFF7F12807F, 0x807F80FF7FDEFF78, 0x80FFEFFF7F12807F, - 0x807F80FF7FDEFF78, 0x80FFEFFF7F12807F, 0x807F80FF7FDEFF78}, - {0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF}, - {0x0000000000000000, 0xFFFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF, - 0x0000000000000000, 0x0000000000000000, 0xFFFFFFFFFFFFFFFF}, - {0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, - 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000, 0xFFFF0000FFFF0000}, - {0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, - 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00, 0xFF00FF00FF00FF00}, - {0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, - 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0, 0xF0F0F0F0F0F0F0F0}, - {0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF, - 0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF, 0xFF0000FFFF0000FF}, - {0xFFFF00000000FFFF, 0xFFFF00000000FFFF, 0xFFFF00000000FFFF, - 0xFFFF00000000FFFF, 0xFFFF00000000FFFF, 0xFFFF00000000FFFF}}; - -#define SLL_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T src_op = static_cast((ws[i] >> shift) & mask); \ - T shift_op = static_cast((wt[i] >> shift) & mask) % size_in_bits; \ - res |= (static_cast(src_op << shift_op) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define SRA_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T src_op = static_cast((ws[i] >> shift) & mask); \ - T shift_op = ((wt[i] >> shift) & mask) % size_in_bits; \ - res |= (static_cast(ArithmeticShiftRight(src_op, shift_op) & \ - mask)) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define SRL_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T src_op = static_cast((ws[i] >> shift) & mask); \ - T shift_op = static_cast(((wt[i] >> shift) & mask) % size_in_bits); \ - res |= (static_cast(src_op >> shift_op) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define BCRL_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T src_op = static_cast((ws[i] >> shift) & mask); \ - T shift_op = static_cast(((wt[i] >> shift) & mask) % size_in_bits); \ - T r = (static_cast(~(1ull << shift_op)) & src_op) & mask; \ - res |= static_cast(r) << shift; \ - } \ - wd[i] = res; \ - } - -#define BSET_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T src_op = static_cast((ws[i] >> shift) & mask); \ - T shift_op = static_cast(((wt[i] >> shift) & mask) % size_in_bits); \ - T r = (static_cast(1ull << shift_op) | src_op) & mask; \ - res |= static_cast(r) << shift; \ - } \ - wd[i] = res; \ - } - -#define BNEG_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T src_op = static_cast((ws[i] >> shift) & mask); \ - T shift_op = static_cast(((wt[i] >> shift) & mask) % size_in_bits); \ - T r = (static_cast(1ull << shift_op) ^ src_op) & mask; \ - res |= static_cast(r) << shift; \ - } \ - wd[i] = res; \ - } - -#define BINSL_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wd_op = static_cast((wd[i] >> shift) & mask); \ - T shift_op = static_cast(((wt[i] >> shift) & mask) % size_in_bits); \ - int bits = shift_op + 1; \ - T r; \ - if (bits == size_in_bits) { \ - r = static_cast(ws_op); \ - } else { \ - uint64_t mask2 = ((1ull << bits) - 1) << (size_in_bits - bits); \ - r = static_cast((static_cast(mask2) & ws_op) | \ - (static_cast(~mask2) & wd_op)); \ - } \ - res |= static_cast(r) << shift; \ - } \ - wd[i] = res; \ - } - -#define BINSR_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wd_op = static_cast((wd[i] >> shift) & mask); \ - T shift_op = static_cast(((wt[i] >> shift) & mask) % size_in_bits); \ - int bits = shift_op + 1; \ - T r; \ - if (bits == size_in_bits) { \ - r = static_cast(ws_op); \ - } else { \ - uint64_t mask2 = (1ull << bits) - 1; \ - r = static_cast((static_cast(mask2) & ws_op) | \ - (static_cast(~mask2) & wd_op)); \ - } \ - res |= static_cast(r) << shift; \ - } \ - wd[i] = res; \ - } - -#define ADDV_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(ws_op + wt_op) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define SUBV_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(ws_op - wt_op) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define MAX_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(std::max(ws_op, wt_op)) & mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define MIN_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(std::min(ws_op, wt_op)) & mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define MAXA_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= \ - (static_cast(Nabs(ws_op) < Nabs(wt_op) ? ws_op : wt_op) & \ - mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define MINA_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= \ - (static_cast(Nabs(ws_op) > Nabs(wt_op) ? ws_op : wt_op) & \ - mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define CEQ_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(!Compare(ws_op, wt_op) ? -1ull : 0ull) & \ - mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define CLT_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast((Compare(ws_op, wt_op) == -1) ? -1ull \ - : 0ull) & \ - mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define CLE_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast((Compare(ws_op, wt_op) != 1) ? -1ull \ - : 0ull) & \ - mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define ADD_A_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(Abs(ws_op) + Abs(wt_op)) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define ADDS_A_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = Nabs(static_cast((ws[i] >> shift) & mask)); \ - T wt_op = Nabs(static_cast((wt[i] >> shift) & mask)); \ - T r; \ - if (ws_op < -std::numeric_limits::max() - wt_op) { \ - r = std::numeric_limits::max(); \ - } else { \ - r = -(ws_op + wt_op); \ - } \ - res |= (static_cast(r) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define ADDS_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(SaturateAdd(ws_op, wt_op)) & mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define AVE_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast( \ - ((wt_op & ws_op) + ((ws_op ^ wt_op) >> 1)) & mask)) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define AVER_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast( \ - ((wt_op | ws_op) - ((ws_op ^ wt_op) >> 1)) & mask)) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define SUBS_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(SaturateSub(ws_op, wt_op)) & mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define SUBSUS_U_DF(T, lanes, mask) \ - using uT = typename std::make_unsigned::type; \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - uT ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - T r; \ - if (wt_op > 0) { \ - uT wtu = static_cast(wt_op); \ - if (wtu > ws_op) { \ - r = 0; \ - } else { \ - r = static_cast(ws_op - wtu); \ - } \ - } else { \ - if (ws_op > std::numeric_limits::max() + wt_op) { \ - r = static_cast(std::numeric_limits::max()); \ - } else { \ - r = static_cast(ws_op - wt_op); \ - } \ - } \ - res |= (static_cast(r) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define SUBSUU_S_DF(T, lanes, mask) \ - using uT = typename std::make_unsigned::type; \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - uT ws_op = static_cast((ws[i] >> shift) & mask); \ - uT wt_op = static_cast((wt[i] >> shift) & mask); \ - uT wdu; \ - T r; \ - if (ws_op > wt_op) { \ - wdu = ws_op - wt_op; \ - if (wdu > std::numeric_limits::max()) { \ - r = std::numeric_limits::max(); \ - } else { \ - r = static_cast(wdu); \ - } \ - } else { \ - wdu = wt_op - ws_op; \ - CHECK(-std::numeric_limits::max() == \ - std::numeric_limits::min() + 1); \ - if (wdu <= std::numeric_limits::max()) { \ - r = -static_cast(wdu); \ - } else { \ - r = std::numeric_limits::min(); \ - } \ - } \ - res |= (static_cast(r) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define ASUB_S_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(Abs(ws_op - wt_op)) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define ASUB_U_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(ws_op > wt_op ? ws_op - wt_op \ - : wt_op - ws_op) & \ - mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define MULV_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - res |= (static_cast(ws_op * wt_op) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define MADDV_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - T wd_op = static_cast((wd[i] >> shift) & mask); \ - res |= (static_cast(wd_op + ws_op * wt_op) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define MSUBV_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - T wd_op = static_cast((wd[i] >> shift) & mask); \ - res |= (static_cast(wd_op - ws_op * wt_op) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define DIV_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - if (wt_op == 0) { \ - res = Unpredictable; \ - break; \ - } \ - res |= (static_cast(ws_op / wt_op) & mask) << shift; \ - } \ - wd[i] = res; \ - } - -#define MOD_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T ws_op = static_cast((ws[i] >> shift) & mask); \ - T wt_op = static_cast((wt[i] >> shift) & mask); \ - if (wt_op == 0) { \ - res = Unpredictable; \ - break; \ - } \ - res |= (static_cast(wt_op != 0 ? ws_op % wt_op : 0) & mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define SRAR_DF(T, lanes, mask) \ - int size_in_bits = kMSARegSize / lanes; \ - for (int i = 0; i < 2; i++) { \ - uint64_t res = 0; \ - for (int j = 0; j < lanes / 2; ++j) { \ - uint64_t shift = size_in_bits * j; \ - T src_op = static_cast((ws[i] >> shift) & mask); \ - T shift_op = ((wt[i] >> shift) & mask) % size_in_bits; \ - uint32_t bit = shift_op == 0 ? 0 : src_op >> (shift_op - 1) & 1; \ - res |= (static_cast(ArithmeticShiftRight(src_op, shift_op) + \ - bit) & \ - mask) \ - << shift; \ - } \ - wd[i] = res; \ - } - -#define PCKEV_DF(T, lanes, mask) \ - T* ws_p = reinterpret_cast(ws); \ - T* wt_p = reinterpret_cast(wt); \ - T* wd_p = reinterpret_cast(wd); \ - for (int i = 0; i < lanes / 2; ++i) { \ - wd_p[i] = wt_p[2 * i]; \ - wd_p[i + lanes / 2] = ws_p[2 * i]; \ - } - -#define PCKOD_DF(T, lanes, mask) \ - T* ws_p = reinterpret_cast(ws); \ - T* wt_p = reinterpret_cast(wt); \ - T* wd_p = reinterpret_cast(wd); \ - for (int i = 0; i < lanes / 2; ++i) { \ - wd_p[i] = wt_p[2 * i + 1]; \ - wd_p[i + lanes / 2] = ws_p[2 * i + 1]; \ - } - -#define ILVL_DF(T, lanes, mask) \ - T* ws_p = reinterpret_cast(ws); \ - T* wt_p = reinterpret_cast(wt); \ - T* wd_p = reinterpret_cast(wd); \ - for (int i = 0; i < lanes / 2; ++i) { \ - wd_p[2 * i] = wt_p[i + lanes / 2]; \ - wd_p[2 * i + 1] = ws_p[i + lanes / 2]; \ - } - -#define ILVR_DF(T, lanes, mask) \ - T* ws_p = reinterpret_cast(ws); \ - T* wt_p = reinterpret_cast(wt); \ - T* wd_p = reinterpret_cast(wd); \ - for (int i = 0; i < lanes / 2; ++i) { \ - wd_p[2 * i] = wt_p[i]; \ - wd_p[2 * i + 1] = ws_p[i]; \ - } - -#define ILVEV_DF(T, lanes, mask) \ - T* ws_p = reinterpret_cast(ws); \ - T* wt_p = reinterpret_cast(wt); \ - T* wd_p = reinterpret_cast(wd); \ - for (int i = 0; i < lanes / 2; ++i) { \ - wd_p[2 * i] = wt_p[2 * i]; \ - wd_p[2 * i + 1] = ws_p[2 * i]; \ - } - -#define ILVOD_DF(T, lanes, mask) \ - T* ws_p = reinterpret_cast(ws); \ - T* wt_p = reinterpret_cast(wt); \ - T* wd_p = reinterpret_cast(wd); \ - for (int i = 0; i < lanes / 2; ++i) { \ - wd_p[2 * i] = wt_p[2 * i + 1]; \ - wd_p[2 * i + 1] = ws_p[2 * i + 1]; \ - } - -#define VSHF_DF(T, lanes, mask) \ - T* ws_p = reinterpret_cast(ws); \ - T* wt_p = reinterpret_cast(wt); \ - T* wd_p = reinterpret_cast(wd); \ - const int mask_not_valid = 0xC0; \ - const int mask_6bits = 0x3F; \ - for (int i = 0; i < lanes; ++i) { \ - if ((wd_p[i] & mask_not_valid)) { \ - wd_p[i] = 0; \ - } else { \ - int k = (wd_p[i] & mask_6bits) % (lanes * 2); \ - wd_p[i] = k > lanes ? ws_p[k - lanes] : wt_p[k]; \ - } \ - } - -#define HADD_DF(T, T_small, lanes) \ - T_small* ws_p = reinterpret_cast(ws); \ - T_small* wt_p = reinterpret_cast(wt); \ - T* wd_p = reinterpret_cast(wd); \ - for (int i = 0; i < lanes; ++i) { \ - wd_p[i] = static_cast(ws_p[2 * i + 1]) + static_cast(wt_p[2 * i]); \ - } - -#define HSUB_DF(T, T_small, lanes) \ - T_small* ws_p = reinterpret_cast(ws); \ - T_small* wt_p = reinterpret_cast(wt); \ - T* wd_p = reinterpret_cast(wd); \ - for (int i = 0; i < lanes; ++i) { \ - wd_p[i] = static_cast(ws_p[2 * i + 1]) - static_cast(wt_p[2 * i]); \ - } - -#define TEST_CASE(V) \ - V(sll_b, SLL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(sll_h, SLL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(sll_w, SLL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(sll_d, SLL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(srl_b, SRL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(srl_h, SRL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(srl_w, SRL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(srl_d, SRL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(bclr_b, BCRL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(bclr_h, BCRL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(bclr_w, BCRL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(bclr_d, BCRL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(bset_b, BSET_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(bset_h, BSET_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(bset_w, BSET_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(bset_d, BSET_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(bneg_b, BNEG_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(bneg_h, BNEG_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(bneg_w, BNEG_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(bneg_d, BNEG_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(binsl_b, BINSL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(binsl_h, BINSL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(binsl_w, BINSL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(binsl_d, BINSL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(binsr_b, BINSR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(binsr_h, BINSR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(binsr_w, BINSR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(binsr_d, BINSR_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(addv_b, ADDV_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(addv_h, ADDV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(addv_w, ADDV_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(addv_d, ADDV_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(subv_b, SUBV_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(subv_h, SUBV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(subv_w, SUBV_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(subv_d, SUBV_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(max_s_b, MAX_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(max_s_h, MAX_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(max_s_w, MAX_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(max_s_d, MAX_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(max_u_b, MAX_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(max_u_h, MAX_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(max_u_w, MAX_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(max_u_d, MAX_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(min_s_b, MIN_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(min_s_h, MIN_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(min_s_w, MIN_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(min_s_d, MIN_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(min_u_b, MIN_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(min_u_h, MIN_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(min_u_w, MIN_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(min_u_d, MIN_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(max_a_b, MAXA_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(max_a_h, MAXA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(max_a_w, MAXA_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(max_a_d, MAXA_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(min_a_b, MINA_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(min_a_h, MINA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(min_a_w, MINA_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(min_a_d, MINA_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(ceq_b, CEQ_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(ceq_h, CEQ_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(ceq_w, CEQ_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(ceq_d, CEQ_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(clt_s_b, CLT_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(clt_s_h, CLT_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(clt_s_w, CLT_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(clt_s_d, CLT_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(clt_u_b, CLT_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(clt_u_h, CLT_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(clt_u_w, CLT_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(clt_u_d, CLT_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(cle_s_b, CLE_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(cle_s_h, CLE_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(cle_s_w, CLE_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(cle_s_d, CLE_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(cle_u_b, CLE_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(cle_u_h, CLE_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(cle_u_w, CLE_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(cle_u_d, CLE_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(add_a_b, ADD_A_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(add_a_h, ADD_A_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(add_a_w, ADD_A_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(add_a_d, ADD_A_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(adds_a_b, ADDS_A_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(adds_a_h, ADDS_A_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(adds_a_w, ADDS_A_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(adds_a_d, ADDS_A_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(adds_s_b, ADDS_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(adds_s_h, ADDS_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(adds_s_w, ADDS_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(adds_s_d, ADDS_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(adds_u_b, ADDS_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(adds_u_h, ADDS_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(adds_u_w, ADDS_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(adds_u_d, ADDS_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(ave_s_b, AVE_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(ave_s_h, AVE_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(ave_s_w, AVE_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(ave_s_d, AVE_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(ave_u_b, AVE_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(ave_u_h, AVE_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(ave_u_w, AVE_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(ave_u_d, AVE_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(aver_s_b, AVER_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(aver_s_h, AVER_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(aver_s_w, AVER_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(aver_s_d, AVER_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(aver_u_b, AVER_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(aver_u_h, AVER_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(aver_u_w, AVER_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(aver_u_d, AVER_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(subs_s_b, SUBS_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(subs_s_h, SUBS_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(subs_s_w, SUBS_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(subs_s_d, SUBS_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(subs_u_b, SUBS_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(subs_u_h, SUBS_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(subs_u_w, SUBS_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(subs_u_d, SUBS_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(subsus_u_b, SUBSUS_U_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(subsus_u_h, SUBSUS_U_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(subsus_u_w, SUBSUS_U_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(subsus_u_d, SUBSUS_U_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(subsuu_s_b, SUBSUU_S_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(subsuu_s_h, SUBSUU_S_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(subsuu_s_w, SUBSUU_S_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(subsuu_s_d, SUBSUU_S_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(asub_s_b, ASUB_S_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(asub_s_h, ASUB_S_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(asub_s_w, ASUB_S_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(asub_s_d, ASUB_S_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(asub_u_b, ASUB_U_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(asub_u_h, ASUB_U_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(asub_u_w, ASUB_U_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(asub_u_d, ASUB_U_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(mulv_b, MULV_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(mulv_h, MULV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(mulv_w, MULV_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(mulv_d, MULV_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(maddv_b, MADDV_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(maddv_h, MADDV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(maddv_w, MADDV_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(maddv_d, MADDV_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(msubv_b, MSUBV_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(msubv_h, MSUBV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(msubv_w, MSUBV_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(msubv_d, MSUBV_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(div_s_b, DIV_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(div_s_h, DIV_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(div_s_w, DIV_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(div_s_d, DIV_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(div_u_b, DIV_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(div_u_h, DIV_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(div_u_w, DIV_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(div_u_d, DIV_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(mod_s_b, MOD_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(mod_s_h, MOD_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(mod_s_w, MOD_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(mod_s_d, MOD_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(mod_u_b, MOD_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(mod_u_h, MOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(mod_u_w, MOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(mod_u_d, MOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(srlr_b, SRAR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(srlr_h, SRAR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(srlr_w, SRAR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(srlr_d, SRAR_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(pckev_b, PCKEV_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(pckev_h, PCKEV_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(pckev_w, PCKEV_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(pckev_d, PCKEV_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(pckod_b, PCKOD_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(pckod_h, PCKOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(pckod_w, PCKOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(pckod_d, PCKOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(ilvl_b, ILVL_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(ilvl_h, ILVL_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(ilvl_w, ILVL_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(ilvl_d, ILVL_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(ilvr_b, ILVR_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(ilvr_h, ILVR_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(ilvr_w, ILVR_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(ilvr_d, ILVR_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(ilvev_b, ILVEV_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(ilvev_h, ILVEV_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(ilvev_w, ILVEV_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(ilvev_d, ILVEV_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(ilvod_b, ILVOD_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(ilvod_h, ILVOD_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(ilvod_w, ILVOD_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(ilvod_d, ILVOD_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(vshf_b, VSHF_DF, uint8_t, kMSALanesByte, UINT8_MAX) \ - V(vshf_h, VSHF_DF, uint16_t, kMSALanesHalf, UINT16_MAX) \ - V(vshf_w, VSHF_DF, uint32_t, kMSALanesWord, UINT32_MAX) \ - V(vshf_d, VSHF_DF, uint64_t, kMSALanesDword, UINT64_MAX) \ - V(hadd_s_h, HADD_DF, int16_t, int8_t, kMSALanesHalf) \ - V(hadd_s_w, HADD_DF, int32_t, int16_t, kMSALanesWord) \ - V(hadd_s_d, HADD_DF, int64_t, int32_t, kMSALanesDword) \ - V(hadd_u_h, HADD_DF, uint16_t, uint8_t, kMSALanesHalf) \ - V(hadd_u_w, HADD_DF, uint32_t, uint16_t, kMSALanesWord) \ - V(hadd_u_d, HADD_DF, uint64_t, uint32_t, kMSALanesDword) \ - V(hsub_s_h, HSUB_DF, int16_t, int8_t, kMSALanesHalf) \ - V(hsub_s_w, HSUB_DF, int32_t, int16_t, kMSALanesWord) \ - V(hsub_s_d, HSUB_DF, int64_t, int32_t, kMSALanesDword) \ - V(hsub_u_h, HSUB_DF, uint16_t, uint8_t, kMSALanesHalf) \ - V(hsub_u_w, HSUB_DF, uint32_t, uint16_t, kMSALanesWord) \ - V(hsub_u_d, HSUB_DF, uint64_t, uint32_t, kMSALanesDword) - -#define RUN_TEST(instr, verify, type, lanes, mask) \ - run_msa_3r(&tc[i], [](MacroAssembler& assm) { __ instr(w2, w1, w0); }, \ - [](uint64_t* ws, uint64_t* wt, uint64_t* wd) { \ - verify(type, lanes, mask); \ - }); - - for (size_t i = 0; i < arraysize(tc); ++i) { - TEST_CASE(RUN_TEST) - } - -#define RUN_TEST2(instr, verify, type, lanes, mask) \ - for (unsigned i = 0; i < arraysize(tc); i++) { \ - for (unsigned j = 0; j < 3; j++) { \ - for (unsigned k = 0; k < lanes; k++) { \ - type* element = reinterpret_cast(&tc[i]); \ - element[k + j * lanes] &= std::numeric_limits::max(); \ - } \ - } \ - } \ - run_msa_3r(&tc[i], [](MacroAssembler& assm) { __ instr(w2, w1, w0); }, \ - [](uint64_t* ws, uint64_t* wt, uint64_t* wd) { \ - verify(type, lanes, mask); \ - }); - -#define TEST_CASE2(V) \ - V(sra_b, SRA_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(sra_h, SRA_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(sra_w, SRA_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(sra_d, SRA_DF, int64_t, kMSALanesDword, UINT64_MAX) \ - V(srar_b, SRAR_DF, int8_t, kMSALanesByte, UINT8_MAX) \ - V(srar_h, SRAR_DF, int16_t, kMSALanesHalf, UINT16_MAX) \ - V(srar_w, SRAR_DF, int32_t, kMSALanesWord, UINT32_MAX) \ - V(srar_d, SRAR_DF, int64_t, kMSALanesDword, UINT64_MAX) - - for (size_t i = 0; i < arraysize(tc); ++i) { - TEST_CASE2(RUN_TEST2) - } - -#undef TEST_CASE -#undef TEST_CASE2 -#undef RUN_TEST -#undef RUN_TEST2 -#undef SLL_DF -#undef SRL_DF -#undef SRA_DF -#undef BCRL_DF -#undef BSET_DF -#undef BNEG_DF -#undef BINSL_DF -#undef BINSR_DF -#undef ADDV_DF -#undef SUBV_DF -#undef MAX_DF -#undef MIN_DF -#undef MAXA_DF -#undef MINA_DF -#undef CEQ_DF -#undef CLT_DF -#undef CLE_DF -#undef ADD_A_DF -#undef ADDS_A_DF -#undef ADDS_DF -#undef AVE_DF -#undef AVER_DF -#undef SUBS_DF -#undef SUBSUS_U_DF -#undef SUBSUU_S_DF -#undef ASUB_S_DF -#undef ASUB_U_DF -#undef MULV_DF -#undef MADDV_DF -#undef MSUBV_DF -#undef DIV_DF -#undef MOD_DF -#undef SRAR_DF -#undef PCKEV_DF -#undef PCKOD_DF -#undef ILVL_DF -#undef ILVR_DF -#undef ILVEV_DF -#undef ILVOD_DF -#undef VSHF_DF -#undef HADD_DF -#undef HSUB_DF -} // namespace internal - -struct TestCaseMsa3RF { - uint64_t ws_lo; - uint64_t ws_hi; - uint64_t wt_lo; - uint64_t wt_hi; - uint64_t wd_lo; - uint64_t wd_hi; -}; - -struct ExpectedResult_MSA3RF { - uint64_t exp_res_lo; - uint64_t exp_res_hi; -}; - -template -void run_msa_3rf(const struct TestCaseMsa3RF* input, - const struct ExpectedResult_MSA3RF* output, - Func Generate2RInstructionFunc) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - CpuFeatureScope fscope(&assm, MIPS_SIMD); - msa_reg_t res; - - load_elements_of_vector( - &assm, reinterpret_cast(&input->ws_lo), w0, t0, t1); - load_elements_of_vector( - &assm, reinterpret_cast(&input->wt_lo), w1, t0, t1); - load_elements_of_vector( - &assm, reinterpret_cast(&input->wd_lo), w2, t0, t1); - Generate2RInstructionFunc(assm); - store_elements_of_vector(&assm, w2, a0); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - - (f.Call(&res, 0, 0, 0, 0)); - - CHECK_EQ(output->exp_res_lo, res.d[0]); - CHECK_EQ(output->exp_res_hi, res.d[1]); -} - -struct TestCaseMsa3RF_F { - float ws_1, ws_2, ws_3, ws_4; - float wt_1, wt_2, wt_3, wt_4; - float wd_1, wd_2, wd_3, wd_4; -}; -struct ExpRes_32I { - int32_t exp_res_1; - int32_t exp_res_2; - int32_t exp_res_3; - int32_t exp_res_4; -}; - -struct TestCaseMsa3RF_D { - double ws_lo, ws_hi; - double wt_lo, wt_hi; - double wd_lo, wd_hi; -}; -struct ExpRes_64I { - int64_t exp_res_lo; - int64_t exp_res_hi; -}; - -TEST(MSA_floating_point_quiet_compare) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float qnan_f = std::numeric_limits::quiet_NaN(); - const double qnan_d = std::numeric_limits::quiet_NaN(); - const float inf_f = std::numeric_limits::infinity(); - const double inf_d = std::numeric_limits::infinity(); - const int32_t ones = -1; - - const struct TestCaseMsa3RF_F tc_w[]{ - {qnan_f, -qnan_f, inf_f, 2.14e9f, // ws - qnan_f, 0.f, qnan_f, -2.14e9f, // wt - 0, 0, 0, 0}, // wd - {inf_f, -inf_f, -3.4e38f, 1.5e-45f, -inf_f, -inf_f, -inf_f, inf_f, 0, 0, - 0, 0}, - {0.f, 19.871e24f, -1.5e-45f, -1.5e-45f, -19.871e24f, 19.871e24f, 1.5e-45f, - -1.5e-45f, 0, 0, 0, 0}}; - - const struct TestCaseMsa3RF_D tc_d[]{ - // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi - {qnan_d, -qnan_d, qnan_f, 0., 0, 0}, - {inf_d, 9.22e18, qnan_d, -9.22e18, 0, 0}, - {inf_d, inf_d, -inf_d, inf_d, 0, 0}, - {-2.3e-308, 5e-324, -inf_d, inf_d, 0, 0}, - {0., 24.1e87, -1.6e308, 24.1e87, 0, 0}, - {-5e-324, -5e-324, 5e-324, -5e-324, 0, 0}}; - - const struct ExpectedResult_MSA3RF exp_res_fcaf = {0, 0}; - const struct ExpRes_32I exp_res_fcun_w[] = { - {ones, ones, ones, 0}, {0, 0, 0, 0}, {0, 0, 0, 0}}; - const struct ExpRes_64I exp_res_fcun_d[] = {{ones, ones}, {ones, 0}, {0, 0}, - {0, 0}, {0, 0}, {0, 0}}; - const struct ExpRes_32I exp_res_fceq_w[] = { - {0, 0, 0, 0}, {0, ones, 0, 0}, {0, ones, 0, ones}}; - const struct ExpRes_64I exp_res_fceq_d[] = {{0, 0}, {0, 0}, {0, ones}, - {0, 0}, {0, ones}, {0, ones}}; - const struct ExpRes_32I exp_res_fcueq_w[] = { - {ones, ones, ones, 0}, {0, ones, 0, 0}, {0, ones, 0, ones}}; - const struct ExpRes_64I exp_res_fcueq_d[] = { - {ones, ones}, {ones, 0}, {0, ones}, {0, 0}, {0, ones}, {0, ones}}; - const struct ExpRes_32I exp_res_fclt_w[] = { - {0, 0, 0, 0}, {0, 0, 0, ones}, {0, 0, ones, 0}}; - const struct ExpRes_64I exp_res_fclt_d[] = {{0, 0}, {0, 0}, {0, 0}, - {0, ones}, {0, 0}, {ones, 0}}; - const struct ExpRes_32I exp_res_fcult_w[] = { - {ones, ones, ones, 0}, {0, 0, 0, ones}, {0, 0, ones, 0}}; - const struct ExpRes_64I exp_res_fcult_d[] = { - {ones, ones}, {ones, 0}, {0, 0}, {0, ones}, {0, 0}, {ones, 0}}; - const struct ExpRes_32I exp_res_fcle_w[] = { - {0, 0, 0, 0}, {0, ones, 0, ones}, {0, ones, ones, ones}}; - const struct ExpRes_64I exp_res_fcle_d[] = { - {0, 0}, {0, 0}, {0, ones}, {0, ones}, {0, ones}, {ones, ones}}; - const struct ExpRes_32I exp_res_fcule_w[] = { - {ones, ones, ones, 0}, {0, ones, 0, ones}, {0, ones, ones, ones}}; - const struct ExpRes_64I exp_res_fcule_d[] = { - {ones, ones}, {ones, 0}, {0, ones}, {0, ones}, {0, ones}, {ones, ones}}; - const struct ExpRes_32I exp_res_fcor_w[] = { - {0, 0, 0, ones}, {ones, ones, ones, ones}, {ones, ones, ones, ones}}; - const struct ExpRes_64I exp_res_fcor_d[] = {{0, 0}, {0, ones}, - {ones, ones}, {ones, ones}, - {ones, ones}, {ones, ones}}; - const struct ExpRes_32I exp_res_fcune_w[] = { - {ones, ones, ones, ones}, {ones, 0, ones, ones}, {ones, 0, ones, 0}}; - const struct ExpRes_64I exp_res_fcune_d[] = {{ones, ones}, {ones, ones}, - {ones, 0}, {ones, ones}, - {ones, 0}, {ones, 0}}; - const struct ExpRes_32I exp_res_fcne_w[] = { - {0, 0, 0, ones}, {ones, 0, ones, ones}, {ones, 0, ones, 0}}; - const struct ExpRes_64I exp_res_fcne_d[] = { - {0, 0}, {0, ones}, {ones, 0}, {ones, ones}, {ones, 0}, {ones, 0}}; - -#define TEST_FP_QUIET_COMPARE_W(instruction, src, exp_res) \ - run_msa_3rf(reinterpret_cast(src), \ - reinterpret_cast(exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); -#define TEST_FP_QUIET_COMPARE_D(instruction, src, exp_res) \ - run_msa_3rf(reinterpret_cast(src), \ - reinterpret_cast(exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); - - for (uint64_t i = 0; i < arraysize(tc_w); i++) { - TEST_FP_QUIET_COMPARE_W(fcaf_w, &tc_w[i], &exp_res_fcaf) - TEST_FP_QUIET_COMPARE_W(fcun_w, &tc_w[i], &exp_res_fcun_w[i]) - TEST_FP_QUIET_COMPARE_W(fceq_w, &tc_w[i], &exp_res_fceq_w[i]) - TEST_FP_QUIET_COMPARE_W(fcueq_w, &tc_w[i], &exp_res_fcueq_w[i]) - TEST_FP_QUIET_COMPARE_W(fclt_w, &tc_w[i], &exp_res_fclt_w[i]) - TEST_FP_QUIET_COMPARE_W(fcult_w, &tc_w[i], &exp_res_fcult_w[i]) - TEST_FP_QUIET_COMPARE_W(fcle_w, &tc_w[i], &exp_res_fcle_w[i]) - TEST_FP_QUIET_COMPARE_W(fcule_w, &tc_w[i], &exp_res_fcule_w[i]) - TEST_FP_QUIET_COMPARE_W(fcor_w, &tc_w[i], &exp_res_fcor_w[i]) - TEST_FP_QUIET_COMPARE_W(fcune_w, &tc_w[i], &exp_res_fcune_w[i]) - TEST_FP_QUIET_COMPARE_W(fcne_w, &tc_w[i], &exp_res_fcne_w[i]) - } - for (uint64_t i = 0; i < arraysize(tc_d); i++) { - TEST_FP_QUIET_COMPARE_D(fcaf_d, &tc_d[i], &exp_res_fcaf) - TEST_FP_QUIET_COMPARE_D(fcun_d, &tc_d[i], &exp_res_fcun_d[i]) - TEST_FP_QUIET_COMPARE_D(fceq_d, &tc_d[i], &exp_res_fceq_d[i]) - TEST_FP_QUIET_COMPARE_D(fcueq_d, &tc_d[i], &exp_res_fcueq_d[i]) - TEST_FP_QUIET_COMPARE_D(fclt_d, &tc_d[i], &exp_res_fclt_d[i]) - TEST_FP_QUIET_COMPARE_D(fcult_d, &tc_d[i], &exp_res_fcult_d[i]) - TEST_FP_QUIET_COMPARE_D(fcle_d, &tc_d[i], &exp_res_fcle_d[i]) - TEST_FP_QUIET_COMPARE_D(fcule_d, &tc_d[i], &exp_res_fcule_d[i]) - TEST_FP_QUIET_COMPARE_D(fcor_d, &tc_d[i], &exp_res_fcor_d[i]) - TEST_FP_QUIET_COMPARE_D(fcune_d, &tc_d[i], &exp_res_fcune_d[i]) - TEST_FP_QUIET_COMPARE_D(fcne_d, &tc_d[i], &exp_res_fcne_d[i]) - } -#undef TEST_FP_QUIET_COMPARE_W -#undef TEST_FP_QUIET_COMPARE_D -} - -template -inline const T* fadd_function(const T* src1, const T* src2, const T* src3, - T* dst) { - for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) { - dst[i] = src1[i] + src2[i]; - } - return dst; -} -template -inline const T* fsub_function(const T* src1, const T* src2, const T* src3, - T* dst) { - for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) { - dst[i] = src1[i] - src2[i]; - } - return dst; -} -template -inline const T* fmul_function(const T* src1, const T* src2, const T* src3, - T* dst) { - for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) { - dst[i] = src1[i] * src2[i]; - } - return dst; -} -template -inline const T* fdiv_function(const T* src1, const T* src2, const T* src3, - T* dst) { - for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) { - dst[i] = src1[i] / src2[i]; - } - return dst; -} -template -inline const T* fmadd_function(const T* src1, const T* src2, const T* src3, - T* dst) { - for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) { - dst[i] = std::fma(src1[i], src2[i], src3[i]); - } - return dst; -} -template -inline const T* fmsub_function(const T* src1, const T* src2, const T* src3, - T* dst) { - for (uint64_t i = 0; i < kMSALanesByte / sizeof(T); i++) { - dst[i] = std::fma(src1[i], -src2[i], src3[i]); - } - return dst; -} - -TEST(MSA_floating_point_arithmetic) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_f = std::numeric_limits::infinity(); - const double inf_d = std::numeric_limits::infinity(); - - const struct TestCaseMsa3RF_F tc_w[] = { - {0.3, -2.14e13f, inf_f, 0.f, // ws - -inf_f, std::sqrt(8.e-26f), -23.e34, -2.14e9f, // wt - -1e30f, 4.6e12f, 0, 2.14e9f}, // wd - {3.4e38f, -1.2e-38f, 1e19f, -1e19f, 3.4e38f, 1.2e-38f, -1e19f, -1e-19f, - 3.4e38f, 1.2e-38f * 3, 3.4e38f, -4e19f}, - {-3e-31f, 3e10f, 1e25f, 123.f, 1e-14f, 1e-34f, 4e25f, 321.f, 3e-17f, - 2e-24f, 2.f, -123456.f}}; - - const struct TestCaseMsa3RF_D tc_d[] = { - // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi - {0.3, -2.14e103, -inf_d, std::sqrt(8.e-206), -1e30, 4.6e102}, - {inf_d, 0., -23.e304, -2.104e9, 0, 2.104e9}, - {3.4e307, -1.2e-307, 3.4e307, 1.2e-307, 3.4e307, 1.2e-307 * 3}, - {1e154, -1e154, -1e154, -1e-154, 2.9e38, -4e19}, - {-3e-301, 3e100, 1e-104, 1e-304, 3e-107, 2e-204}, - {1e205, 123., 4e205, 321., 2., -123456.}}; - - struct ExpectedResult_MSA3RF dst_container; - -#define FP_ARITHMETIC_DF_W(instr, function, src1, src2, src3) \ - run_msa_3rf( \ - reinterpret_cast(src1), \ - reinterpret_cast(function( \ - src1, src2, src3, reinterpret_cast(&dst_container))), \ - [](MacroAssembler& assm) { __ instr(w2, w0, w1); }); - -#define FP_ARITHMETIC_DF_D(instr, function, src1, src2, src3) \ - run_msa_3rf( \ - reinterpret_cast(src1), \ - reinterpret_cast(function( \ - src1, src2, src3, reinterpret_cast(&dst_container))), \ - [](MacroAssembler& assm) { __ instr(w2, w0, w1); }); - - for (uint64_t i = 0; i < arraysize(tc_w); i++) { - FP_ARITHMETIC_DF_W(fadd_w, fadd_function, &tc_w[i].ws_1, &tc_w[i].wt_1, - &tc_w[i].wd_1) - FP_ARITHMETIC_DF_W(fsub_w, fsub_function, &tc_w[i].ws_1, &tc_w[i].wt_1, - &tc_w[i].wd_1) - FP_ARITHMETIC_DF_W(fmul_w, fmul_function, &tc_w[i].ws_1, &tc_w[i].wt_1, - &tc_w[i].wd_1) - FP_ARITHMETIC_DF_W(fdiv_w, fdiv_function, &tc_w[i].ws_1, &tc_w[i].wt_1, - &tc_w[i].wd_1) - FP_ARITHMETIC_DF_W(fmadd_w, fmadd_function, &tc_w[i].ws_1, &tc_w[i].wt_1, - &tc_w[i].wd_1) - FP_ARITHMETIC_DF_W(fmsub_w, fmsub_function, &tc_w[i].ws_1, &tc_w[i].wt_1, - &tc_w[i].wd_1) - } - for (uint64_t i = 0; i < arraysize(tc_d); i++) { - FP_ARITHMETIC_DF_D(fadd_d, fadd_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo, - &tc_d[i].wd_lo) - FP_ARITHMETIC_DF_D(fsub_d, fsub_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo, - &tc_d[i].wd_lo) - FP_ARITHMETIC_DF_D(fmul_d, fmul_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo, - &tc_d[i].wd_lo) - FP_ARITHMETIC_DF_D(fdiv_d, fdiv_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo, - &tc_d[i].wd_lo) - FP_ARITHMETIC_DF_D(fmadd_d, fmadd_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo, - &tc_d[i].wd_lo) - FP_ARITHMETIC_DF_D(fmsub_d, fmsub_function, &tc_d[i].ws_lo, &tc_d[i].wt_lo, - &tc_d[i].wd_lo) - } -#undef FP_ARITHMETIC_DF_W -#undef FP_ARITHMETIC_DF_D -} - -struct ExpRes_F { - float exp_res_1; - float exp_res_2; - float exp_res_3; - float exp_res_4; -}; - -struct ExpRes_D { - double exp_res_1; - double exp_res_2; -}; - -TEST(MSA_fmin_fmin_a_fmax_fmax_a) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_f = std::numeric_limits::infinity(); - const double inf_d = std::numeric_limits::infinity(); - - const struct TestCaseMsa3RF_F tc_w[] = { - {0.3f, -2.14e13f, inf_f, -0.f, // ws - -inf_f, -std::sqrt(8.e26f), -23.e34f, -2.14e9f, // wt - 0, 0, 0, 0}, // wd - {3.4e38f, 1.2e-41f, 1e19f, 1e19f, // ws - 3.4e38f, -1.1e-41f, -1e-42f, -1e29f, // wt - 0, 0, 0, 0}}; // wd - - const struct TestCaseMsa3RF_D tc_d[] = { - // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi - {0.3, -2.14e103, -inf_d, -std::sqrt(8e206), 0, 0}, - {inf_d, -0., -23e304, -2.14e90, 0, 0}, - {3.4e307, 1.2e-320, 3.4e307, -1.1e-320, 0, 0}, - {1e154, 1e154, -1e-321, -1e174, 0, 0}}; - - const struct ExpRes_F exp_res_fmax_w[] = {{0.3f, -2.14e13f, inf_f, -0.f}, - {3.4e38f, 1.2e-41f, 1e19f, 1e19f}}; - const struct ExpRes_F exp_res_fmax_a_w[] = { - {-inf_f, -std::sqrt(8e26f), inf_f, -2.14e9f}, - {3.4e38f, 1.2e-41f, 1e19f, -1e29f}}; - const struct ExpRes_F exp_res_fmin_w[] = { - {-inf_f, -std::sqrt(8.e26f), -23e34f, -2.14e9f}, - {3.4e38f, -1.1e-41f, -1e-42f, -1e29f}}; - const struct ExpRes_F exp_res_fmin_a_w[] = { - {0.3, -2.14e13f, -23.e34f, -0.f}, {3.4e38f, -1.1e-41f, -1e-42f, 1e19f}}; - - const struct ExpRes_D exp_res_fmax_d[] = { - {0.3, -2.14e103}, {inf_d, -0.}, {3.4e307, 1.2e-320}, {1e154, 1e154}}; - const struct ExpRes_D exp_res_fmax_a_d[] = {{-inf_d, -std::sqrt(8e206)}, - {inf_d, -2.14e90}, - {3.4e307, 1.2e-320}, - {1e154, -1e174}}; - const struct ExpRes_D exp_res_fmin_d[] = {{-inf_d, -std::sqrt(8e206)}, - {-23e304, -2.14e90}, - {3.4e307, -1.1e-320}, - {-1e-321, -1e174}}; - const struct ExpRes_D exp_res_fmin_a_d[] = { - {0.3, -2.14e103}, {-23e304, -0.}, {3.4e307, -1.1e-320}, {-1e-321, 1e154}}; - -#define TEST_FP_MIN_MAX_W(instruction, src, exp_res) \ - run_msa_3rf(reinterpret_cast(src), \ - reinterpret_cast(exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); - -#define TEST_FP_MIN_MAX_D(instruction, src, exp_res) \ - run_msa_3rf(reinterpret_cast(src), \ - reinterpret_cast(exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); - - for (uint64_t i = 0; i < arraysize(tc_w); i++) { - TEST_FP_MIN_MAX_W(fmax_w, &tc_w[i], &exp_res_fmax_w[i]) - TEST_FP_MIN_MAX_W(fmax_a_w, &tc_w[i], &exp_res_fmax_a_w[i]) - TEST_FP_MIN_MAX_W(fmin_w, &tc_w[i], &exp_res_fmin_w[i]) - TEST_FP_MIN_MAX_W(fmin_a_w, &tc_w[i], &exp_res_fmin_a_w[i]) - } - - for (uint64_t i = 0; i < arraysize(tc_d); i++) { - TEST_FP_MIN_MAX_D(fmax_d, &tc_d[i], &exp_res_fmax_d[i]) - TEST_FP_MIN_MAX_D(fmax_a_d, &tc_d[i], &exp_res_fmax_a_d[i]) - TEST_FP_MIN_MAX_D(fmin_d, &tc_d[i], &exp_res_fmin_d[i]) - TEST_FP_MIN_MAX_D(fmin_a_d, &tc_d[i], &exp_res_fmin_a_d[i]) - } -#undef TEST_FP_MIN_MAX_W -#undef TEST_FP_MIN_MAX_D -} - -struct TestCaseMsa3RF_16I { - int16_t ws_1, ws_2, ws_3, ws_4, ws_5, ws_6, ws_7, ws_8; - int16_t wt_1, wt_2, wt_3, wt_4, wt_5, wt_6, wt_7, wt_8; - int16_t wd_1, wd_2, wd_3, wd_4, wd_5, wd_6, wd_7, wd_8; -}; -struct ExpRes_16I { - int16_t exp_res_1; - int16_t exp_res_2; - int16_t exp_res_3; - int16_t exp_res_4; - int16_t exp_res_5; - int16_t exp_res_6; - int16_t exp_res_7; - int16_t exp_res_8; -}; - -struct TestCaseMsa3RF_32I { - int32_t ws_1, ws_2, ws_3, ws_4; - int32_t wt_1, wt_2, wt_3, wt_4; - int32_t wd_1, wd_2, wd_3, wd_4; -}; - -TEST(MSA_fixed_point_arithmetic) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const struct TestCaseMsa3RF tc_h[]{ - {0x800080007FFF7FFF, 0xE1ED8000FAD3863A, 0x80007FFF00AF7FFF, - 0x800015A77FFFA0EB, 0x7FFF800080007FFF, 0x80007FFF1F207364}, - {0x800080007FFF006A, 0x002AFFC4329AD87B, 0x80007FFF7FFF00F3, - 0xFFECFFB4D0D7F429, 0x80007FFF80007C33, 0x54AC6BBCE53B8C91}}; - - const struct TestCaseMsa3RF tc_w[]{ - {0x8000000080000000, 0x7FFFFFFF7FFFFFFF, 0x800000007FFFFFFF, - 0x00001FF37FFFFFFF, 0x7FFFFFFF80000000, 0x800000007FFFFFFF}, - {0xE1ED035580000000, 0xFAD3863AED462C0B, 0x8000000015A70AEC, - 0x7FFFFFFFA0EBD354, 0x800000007FFFFFFF, 0xD0D7F4291F207364}, - {0x8000000080000000, 0x7FFFFFFF0000DA1F, 0x800000007FFFFFFF, - 0x7FFFFFFF00F39C3B, 0x800000007FFFFFFF, 0x800000007C33F2FD}, - {0x0000AC33FFFF329A, 0x54AC6BBCE53BD87B, 0xFFFFE2B4D0D7F429, - 0x0355ED462C0B1FF3, 0xB5DEB625939DD3F9, 0xE642ADFA69519596}}; - - const struct ExpectedResult_MSA3RF exp_res_mul_q_h[] = { - {0x7FFF800100AE7FFE, 0x1E13EA59FAD35A74}, - {0x7FFF80017FFE0000, 0xFFFF0000ED5B03A7}}; - const struct ExpectedResult_MSA3RF exp_res_madd_q_h[] = { - {0x7FFF800080AE7FFF, 0x9E136A5819F37FFF}, - {0x00000000FFFE7C33, 0x54AB6BBCD2969038}}; - const struct ExpectedResult_MSA3RF exp_res_msub_q_h[] = { - {0xFFFFFFFF80000000, 0x80007FFF244C18EF}, - {0x80007FFF80007C32, 0x54AC6BBBF7DF88E9}}; - const struct ExpectedResult_MSA3RF exp_res_mulr_q_h[] = { - {0x7FFF800100AF7FFE, 0x1E13EA59FAD35A75}, - {0x7FFF80017FFE0001, 0x00000000ED5B03A8}}; - const struct ExpectedResult_MSA3RF exp_res_maddr_q_h[] = { - {0x7FFF800080AF7FFF, 0x9E136A5819F37FFF}, - {0x00000000FFFE7C34, 0x54AC6BBCD2969039}}; - const struct ExpectedResult_MSA3RF exp_res_msubr_q_h[] = { - {0xFFFFFFFF80000001, 0x80007FFF244D18EF}, - {0x80007FFF80007C32, 0x54AC6BBCF7E088E9}}; - - const struct ExpectedResult_MSA3RF exp_res_mul_q_w[] = { - {0x7FFFFFFF80000001, 0x00001FF27FFFFFFE}, - {0x1E12FCABEA58F514, 0xFAD3863A0DE8DEE1}, - {0x7FFFFFFF80000001, 0x7FFFFFFE0000019F}, - {0xFFFFFFFF00004BAB, 0x0234E1FBF6CA3EE0}}; - const struct ExpectedResult_MSA3RF exp_res_madd_q_w[] = { - {0x7FFFFFFF80000000, 0x80001FF27FFFFFFF}, - {0x9E12FCAB6A58F513, 0xCBAB7A632D095245}, - {0x0000000000000000, 0xFFFFFFFE7C33F49C}, - {0xB5DEB624939E1FA4, 0xE8778FF5601BD476}}; - const struct ExpectedResult_MSA3RF exp_res_msub_q_w[] = { - {0xFFFFFFFFFFFFFFFF, 0x8000000000000000}, - {0x800000007FFFFFFF, 0xD6046DEE11379482}, - {0x800000007FFFFFFF, 0x800000007C33F15D}, - {0xB5DEB625939D884D, 0xE40DCBFE728756B5}}; - const struct ExpectedResult_MSA3RF exp_res_mulr_q_w[] = { - {0x7FFFFFFF80000001, 0x00001FF37FFFFFFE}, - {0x1E12FCABEA58F514, 0xFAD3863A0DE8DEE2}, - {0x7FFFFFFF80000001, 0x7FFFFFFE0000019F}, - {0x0000000000004BAC, 0x0234E1FCF6CA3EE1}}; - const struct ExpectedResult_MSA3RF exp_res_maddr_q_w[] = { - {0x7FFFFFFF80000000, 0x80001FF37FFFFFFF}, - {0x9E12FCAB6A58F513, 0xCBAB7A632D095246}, - {0x0000000000000000, 0xFFFFFFFE7C33F49C}, - {0xB5DEB625939E1FA5, 0xE8778FF6601BD477}}; - const struct ExpectedResult_MSA3RF exp_res_msubr_q_w[] = { - {0xFFFFFFFFFFFFFFFF, 0x8000000000000001}, - {0x800000007FFFFFFF, 0xD6046DEF11379482}, - {0x800000007FFFFFFF, 0x800000007C33F15E}, - {0xB5DEB625939D884D, 0xE40DCBFE728756B5}}; - -#define TEST_FIXED_POINT_DF_H(instruction, src, exp_res) \ - run_msa_3rf((src), (exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); - -#define TEST_FIXED_POINT_DF_W(instruction, src, exp_res) \ - run_msa_3rf((src), (exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); - - for (uint64_t i = 0; i < arraysize(tc_h); i++) { - TEST_FIXED_POINT_DF_H(mul_q_h, &tc_h[i], &exp_res_mul_q_h[i]) - TEST_FIXED_POINT_DF_H(madd_q_h, &tc_h[i], &exp_res_madd_q_h[i]) - TEST_FIXED_POINT_DF_H(msub_q_h, &tc_h[i], &exp_res_msub_q_h[i]) - TEST_FIXED_POINT_DF_H(mulr_q_h, &tc_h[i], &exp_res_mulr_q_h[i]) - TEST_FIXED_POINT_DF_H(maddr_q_h, &tc_h[i], &exp_res_maddr_q_h[i]) - TEST_FIXED_POINT_DF_H(msubr_q_h, &tc_h[i], &exp_res_msubr_q_h[i]) - } - - for (uint64_t i = 0; i < arraysize(tc_w); i++) { - TEST_FIXED_POINT_DF_W(mul_q_w, &tc_w[i], &exp_res_mul_q_w[i]) - TEST_FIXED_POINT_DF_W(madd_q_w, &tc_w[i], &exp_res_madd_q_w[i]) - TEST_FIXED_POINT_DF_W(msub_q_w, &tc_w[i], &exp_res_msub_q_w[i]) - TEST_FIXED_POINT_DF_W(mulr_q_w, &tc_w[i], &exp_res_mulr_q_w[i]) - TEST_FIXED_POINT_DF_W(maddr_q_w, &tc_w[i], &exp_res_maddr_q_w[i]) - TEST_FIXED_POINT_DF_W(msubr_q_w, &tc_w[i], &exp_res_msubr_q_w[i]) - } -#undef TEST_FIXED_POINT_DF_H -#undef TEST_FIXED_POINT_DF_W -} - -TEST(MSA_fexdo) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float inf_float = std::numeric_limits::infinity(); - const float nan_float = std::numeric_limits::quiet_NaN(); - const double inf_double = std::numeric_limits::infinity(); - - const struct TestCaseMsa3RF_F tc_w[] = { - // ws_1, ws_2, ws_3, ws_4, wt_1, wt_2, wt_3, wt_4, wd_1, wd_2, wd_3, wd_4 - {inf_float, nan_float, 66505.f, 65504.f, 6.2e-5f, 5e-5f, -32.42f, - -inf_float, 0, 0, 0, 0}, - {-0.f, 0.f, 123.567f, -765.321f, -6e-8f, 5.9e-8f, 1e-7f, -1e-20f, 0, 0, 0, - 0}, - {1e-36f, 1e20f, -1e20f, 2e-20f, 6e-8f, -2.9e-8f, -66505.f, -65504.f, 0, 0, - 0, 0}}; - - const struct TestCaseMsa3RF_D tc_d[] = { - // ws_lo, ws_hi, wt_lo, wt_hi, wd_lo, wd_hi - {inf_double, -1234., 4e38, 3.4e38, 0, 0}, - {1.2e-38, 1.1e-39, -38.92f, -inf_double, 0, 0}, - {-0., 0., 123.567e31, -765.321e33, 0, 0}, - {-1.5e-45, 1.3e-45, 1e-42, -1e-200, 0, 0}, - {1e-202, 1e158, -1e159, 1e14, 0, 0}, - {1.5e-42, 1.3e-46, -123.567e31, 765.321e33, 0, 0}}; - - const struct ExpRes_16I exp_res_fexdo_w[] = { - {static_cast(0x0410), static_cast(0x0347), - static_cast(0xD00D), static_cast(0xFC00), - static_cast(0x7C00), static_cast(0x7DFF), - static_cast(0x7C00), static_cast(0x7BFF)}, - {static_cast(0x8001), static_cast(0x0001), - static_cast(0x0002), static_cast(0x8000), - static_cast(0x8000), static_cast(0x0000), - static_cast(0x57B9), static_cast(0xE1FB)}, - {static_cast(0x0001), static_cast(0x8000), - static_cast(0xFC00), static_cast(0xFBFF), - static_cast(0x0000), static_cast(0x7C00), - static_cast(0xFC00), static_cast(0x0000)}}; - - const struct ExpRes_32I exp_res_fexdo_d[] = { - {base::bit_cast(0x7F800000), base::bit_cast(0x7F7FC99E), - base::bit_cast(0x7F800000), - base::bit_cast(0xC49A4000)}, - {base::bit_cast(0xC21BAE14), base::bit_cast(0xFF800000), - base::bit_cast(0x0082AB1E), - base::bit_cast(0x000BFA5A)}, - {base::bit_cast(0x7673B164), base::bit_cast(0xFB13653D), - base::bit_cast(0x80000000), - base::bit_cast(0x00000000)}, - {base::bit_cast(0x000002CA), base::bit_cast(0x80000000), - base::bit_cast(0x80000001), - base::bit_cast(0x00000001)}, - {base::bit_cast(0xFF800000), base::bit_cast(0x56B5E621), - base::bit_cast(0x00000000), - base::bit_cast(0x7F800000)}, - {base::bit_cast(0xF673B164), base::bit_cast(0x7B13653D), - base::bit_cast(0x0000042E), - base::bit_cast(0x00000000)}}; - -#define TEST_FEXDO_H(instruction, src, exp_res) \ - run_msa_3rf(reinterpret_cast(src), \ - reinterpret_cast(exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); - -#define TEST_FEXDO_W(instruction, src, exp_res) \ - run_msa_3rf(reinterpret_cast(src), \ - reinterpret_cast(exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); - - for (uint64_t i = 0; i < arraysize(tc_w); i++) { - TEST_FEXDO_H(fexdo_h, &tc_w[i], &exp_res_fexdo_w[i]) - } - - for (uint64_t i = 0; i < arraysize(tc_d); i++) { - TEST_FEXDO_W(fexdo_w, &tc_d[i], &exp_res_fexdo_d[i]) - } - -#undef TEST_FEXDO_H -#undef TEST_FEXDO_W -} - -TEST(MSA_ftq) { - if (!IsMipsArchVariant(kMips32r6) || !CpuFeatures::IsSupported(MIPS_SIMD)) - return; - - CcTest::InitializeVM(); - - const float nan_float = std::numeric_limits::quiet_NaN(); - const float inf_float = std::numeric_limits::infinity(); - const double nan_double = std::numeric_limits::quiet_NaN(); - const double inf_double = std::numeric_limits::infinity(); - - const struct TestCaseMsa3RF_F tc_w[] = { - {1.f, -0.999f, 1.5f, -31e-6, 1e-7, -0.598, 0.0023, -0.f, 0, 0, 0, 0}, - {100.f, -102.f, -1.1f, 1.3f, 0.f, -1.f, 0.9999f, -0.000322, 0, 0, 0, 0}, - {nan_float, inf_float, -inf_float, -nan_float, -1e-40, 3e-44, 8.3e36, - -0.00003, 0, 0, 0, 0}}; - - const struct TestCaseMsa3RF_D tc_d[] = { - {1., -0.999, 1.5, -31e-6, 0, 0}, - {1e-7, -0.598, 0.0023, -0.f, 0, 0}, - {100.f, -102.f, -1.1f, 1.3f, 0, 0}, - {0.f, -1.f, 0.9999f, -0.000322, 0, 0}, - {nan_double, inf_double, -inf_double, -nan_double, 0, 0}, - {-3e306, 2e-307, 9e307, 2e-307, 0, 0}}; - - const struct ExpRes_16I exp_res_ftq_w[] = { - {static_cast(0x0000), static_cast(0xB375), - static_cast(0x004B), static_cast(0x0000), - static_cast(0x7FFF), static_cast(0x8021), - static_cast(0x7FFF), static_cast(0xFFFF)}, - {static_cast(0x0000), static_cast(0x8000), - static_cast(0x7FFD), static_cast(0xFFF5), - static_cast(0x7FFF), static_cast(0x8000), - static_cast(0x8000), static_cast(0x7FFF)}, - {static_cast(0x0000), static_cast(0x0000), - static_cast(0x7FFF), static_cast(0xFFFF), - static_cast(0x0000), static_cast(0x7FFF), - static_cast(0x8000), static_cast(0x0000)}}; - - const struct ExpRes_32I exp_res_ftq_d[] = { - {base::bit_cast(0x7FFFFFFF), base::bit_cast(0xFFFEFBF4), - base::bit_cast(0x7FFFFFFF), - base::bit_cast(0x8020C49C)}, - {base::bit_cast(0x004B5DCC), base::bit_cast(0x00000000), - base::bit_cast(0x000000D7), - base::bit_cast(0xB374BC6A)}, - {base::bit_cast(0x80000000), base::bit_cast(0x7FFFFFFF), - base::bit_cast(0x7FFFFFFF), - base::bit_cast(0x80000000)}, - {base::bit_cast(0x7FFCB900), base::bit_cast(0xFFF572DE), - base::bit_cast(0x00000000), - base::bit_cast(0x80000000)}, - {base::bit_cast(0x80000000), base::bit_cast(0x00000000), - base::bit_cast(0x00000000), - base::bit_cast(0x7FFFFFFF)}, - {base::bit_cast(0x7FFFFFFF), base::bit_cast(0x00000000), - base::bit_cast(0x80000000), - base::bit_cast(0x00000000)}}; - -#define TEST_FTQ_H(instruction, src, exp_res) \ - run_msa_3rf(reinterpret_cast(src), \ - reinterpret_cast(exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); - -#define TEST_FTQ_W(instruction, src, exp_res) \ - run_msa_3rf(reinterpret_cast(src), \ - reinterpret_cast(exp_res), \ - [](MacroAssembler& assm) { __ instruction(w2, w0, w1); }); - - for (uint64_t i = 0; i < arraysize(tc_w); i++) { - TEST_FTQ_H(ftq_h, &tc_w[i], &exp_res_ftq_w[i]) - } - - for (uint64_t i = 0; i < arraysize(tc_d); i++) { - TEST_FTQ_W(ftq_w, &tc_d[i], &exp_res_ftq_d[i]) - } - -#undef TEST_FTQ_H -#undef TEST_FTQ_W -} - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/test/cctest/test-disasm-mips.cc b/test/cctest/test-disasm-mips.cc deleted file mode 100644 index 5bb3ed46f8..0000000000 --- a/test/cctest/test-disasm-mips.cc +++ /dev/null @@ -1,1814 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#include - -#include "src/diagnostics/disasm.h" -#include "src/execution/frames-inl.h" -#include "test/cctest/cctest.h" - -namespace v8 { -namespace internal { - -bool prev_instr_compact_branch = false; - -bool DisassembleAndCompare(byte* pc, const char* compare_string) { - disasm::NameConverter converter; - disasm::Disassembler disasm(converter); - base::EmbeddedVector disasm_buffer; - - if (prev_instr_compact_branch) { - disasm.InstructionDecode(disasm_buffer, pc); - pc += 4; - } - - disasm.InstructionDecode(disasm_buffer, pc); - - if (strcmp(compare_string, disasm_buffer.begin()) != 0) { - fprintf(stderr, - "expected: \n" - "%s\n" - "disassembled: \n" - "%s\n\n", - compare_string, disasm_buffer.begin()); - return false; - } - return true; -} - - -// Set up V8 to a state where we can at least run the assembler and -// disassembler. Declare the variables and allocate the data structures used -// in the rest of the macros. -#define SET_UP() \ - CcTest::InitializeVM(); \ - Isolate* isolate = CcTest::i_isolate(); \ - HandleScope scope(isolate); \ - byte* buffer = reinterpret_cast(malloc(4 * 1024)); \ - Assembler assm(AssemblerOptions{}, \ - ExternalAssemblerBuffer(buffer, 4 * 1024)); \ - bool failure = false; - -// This macro assembles one instruction using the preallocated assembler and -// disassembles the generated instruction, comparing the output to the expected -// value. If the comparison fails an error message is printed, but the test -// continues to run until the end. -#define COMPARE(asm_, compare_string) \ - { \ - int pc_offset = assm.pc_offset(); \ - byte *progcounter = &buffer[pc_offset]; \ - assm.asm_; \ - if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \ - } - - -// Verify that all invocations of the COMPARE macro passed successfully. -// Exit with a failure if at least one of the tests failed. -#define VERIFY_RUN() \ - if (failure) { \ - FATAL("MIPS Disassembler tests failed.\n"); \ - } - -#define COMPARE_PC_REL_COMPACT(asm_, compare_string, offset) \ - { \ - int pc_offset = assm.pc_offset(); \ - byte *progcounter = &buffer[pc_offset]; \ - char str_with_address[100]; \ - prev_instr_compact_branch = assm.IsPrevInstrCompactBranch(); \ - if (prev_instr_compact_branch) { \ - snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \ - compare_string, \ - static_cast(progcounter + 8 + (offset * 4))); \ - } else { \ - snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \ - compare_string, \ - static_cast(progcounter + 4 + (offset * 4))); \ - } \ - assm.asm_; \ - if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \ - } - -#define COMPARE_PC_REL(asm_, compare_string, offset) \ - { \ - int pc_offset = assm.pc_offset(); \ - byte *progcounter = &buffer[pc_offset]; \ - char str_with_address[100]; \ - snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \ - compare_string, static_cast(progcounter + (offset * 4))); \ - assm.asm_; \ - if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \ - } - -#define COMPARE_MSA_BRANCH(asm_, compare_string, offset) \ - { \ - int pc_offset = assm.pc_offset(); \ - byte* progcounter = &buffer[pc_offset]; \ - char str_with_address[100]; \ - snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \ - compare_string, \ - static_cast(progcounter + 4 + (offset * 4))); \ - assm.asm_; \ - if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \ - } - -#define COMPARE_PC_JUMP(asm_, compare_string, target) \ - { \ - int pc_offset = assm.pc_offset(); \ - byte* progcounter = &buffer[pc_offset]; \ - char str_with_address[100]; \ - int instr_index = (target >> 2) & kImm26Mask; \ - snprintf( \ - str_with_address, sizeof(str_with_address), "%s %p -> %p", \ - compare_string, reinterpret_cast(target), \ - reinterpret_cast(((uint32_t)(progcounter + 4) & ~0xFFFFFFF) | \ - (instr_index << 2))); \ - assm.asm_; \ - if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \ - } - -#define GET_PC_REGION(pc_region) \ - { \ - int pc_offset = assm.pc_offset(); \ - byte* progcounter = &buffer[pc_offset]; \ - pc_region = reinterpret_cast(progcounter + 4) & ~0xFFFFFFF; \ - } - -TEST(Type0) { - SET_UP(); - - COMPARE(addu(a0, a1, a2), - "00a62021 addu a0, a1, a2"); - COMPARE(addu(t2, t3, t4), - "016c5021 addu t2, t3, t4"); - COMPARE(addu(v0, v1, s0), - "00701021 addu v0, v1, s0"); - - COMPARE(subu(a0, a1, a2), - "00a62023 subu a0, a1, a2"); - COMPARE(subu(t2, t3, t4), - "016c5023 subu t2, t3, t4"); - COMPARE(subu(v0, v1, s0), - "00701023 subu v0, v1, s0"); - - if (!IsMipsArchVariant(kMips32r6)) { - COMPARE(mult(a0, a1), - "00850018 mult a0, a1"); - COMPARE(mult(t2, t3), - "014b0018 mult t2, t3"); - COMPARE(mult(v0, v1), - "00430018 mult v0, v1"); - - COMPARE(multu(a0, a1), - "00850019 multu a0, a1"); - COMPARE(multu(t2, t3), - "014b0019 multu t2, t3"); - COMPARE(multu(v0, v1), - "00430019 multu v0, v1"); - - COMPARE(div(a0, a1), - "0085001a div a0, a1"); - COMPARE(div(t2, t3), - "014b001a div t2, t3"); - COMPARE(div(v0, v1), - "0043001a div v0, v1"); - - COMPARE(divu(a0, a1), - "0085001b divu a0, a1"); - COMPARE(divu(t2, t3), - "014b001b divu t2, t3"); - COMPARE(divu(v0, v1), - "0043001b divu v0, v1"); - - if (!IsMipsArchVariant(kLoongson)) { - COMPARE(mul(a0, a1, a2), - "70a62002 mul a0, a1, a2"); - COMPARE(mul(t2, t3, t4), - "716c5002 mul t2, t3, t4"); - COMPARE(mul(v0, v1, s0), - "70701002 mul v0, v1, s0"); - } - } else { // MIPS32r6. - COMPARE(mul(a0, a1, a2), - "00a62098 mul a0, a1, a2"); - COMPARE(muh(a0, a1, a2), - "00a620d8 muh a0, a1, a2"); - COMPARE(mul(t1, t2, t3), - "014b4898 mul t1, t2, t3"); - COMPARE(muh(t1, t2, t3), - "014b48d8 muh t1, t2, t3"); - COMPARE(mul(v0, v1, a0), - "00641098 mul v0, v1, a0"); - COMPARE(muh(v0, v1, a0), - "006410d8 muh v0, v1, a0"); - - COMPARE(mulu(a0, a1, a2), - "00a62099 mulu a0, a1, a2"); - COMPARE(muhu(a0, a1, a2), - "00a620d9 muhu a0, a1, a2"); - COMPARE(mulu(t1, t2, t3), - "014b4899 mulu t1, t2, t3"); - COMPARE(muhu(t1, t2, t3), - "014b48d9 muhu t1, t2, t3"); - COMPARE(mulu(v0, v1, a0), - "00641099 mulu v0, v1, a0"); - COMPARE(muhu(v0, v1, a0), - "006410d9 muhu v0, v1, a0"); - - COMPARE(div(a0, a1, a2), - "00a6209a div a0, a1, a2"); - COMPARE(mod(a0, a1, a2), - "00a620da mod a0, a1, a2"); - COMPARE(div(t1, t2, t3), - "014b489a div t1, t2, t3"); - COMPARE(mod(t1, t2, t3), - "014b48da mod t1, t2, t3"); - COMPARE(div(v0, v1, a0), - "0064109a div v0, v1, a0"); - COMPARE(mod(v0, v1, a0), - "006410da mod v0, v1, a0"); - - COMPARE(divu(a0, a1, a2), - "00a6209b divu a0, a1, a2"); - COMPARE(modu(a0, a1, a2), - "00a620db modu a0, a1, a2"); - COMPARE(divu(t1, t2, t3), - "014b489b divu t1, t2, t3"); - COMPARE(modu(t1, t2, t3), - "014b48db modu t1, t2, t3"); - COMPARE(divu(v0, v1, a0), - "0064109b divu v0, v1, a0"); - COMPARE(modu(v0, v1, a0), - "006410db modu v0, v1, a0"); - - COMPARE_PC_REL_COMPACT(bovc(a0, a0, static_cast(0)), - "20840000 bovc a0, a0, 0", 0); - COMPARE_PC_REL_COMPACT(bovc(a1, a0, static_cast(0)), - "20a40000 bovc a1, a0, 0", 0); - COMPARE_PC_REL_COMPACT(bovc(a1, a0, 32767), - "20a47fff bovc a1, a0, 32767", 32767); - COMPARE_PC_REL_COMPACT(bovc(a1, a0, -32768), - "20a48000 bovc a1, a0, -32768", -32768); - - COMPARE_PC_REL_COMPACT(bnvc(a0, a0, static_cast(0)), - "60840000 bnvc a0, a0, 0", 0); - COMPARE_PC_REL_COMPACT(bnvc(a1, a0, static_cast(0)), - "60a40000 bnvc a1, a0, 0", 0); - COMPARE_PC_REL_COMPACT(bnvc(a1, a0, 32767), - "60a47fff bnvc a1, a0, 32767", 32767); - COMPARE_PC_REL_COMPACT(bnvc(a1, a0, -32768), - "60a48000 bnvc a1, a0, -32768", -32768); - - COMPARE_PC_REL_COMPACT(beqzc(a0, -1048576), - "d8900000 beqzc a0, -1048576", -1048576); - COMPARE_PC_REL_COMPACT(beqzc(a0, -1), "d89fffff beqzc a0, -1", -1); - COMPARE_PC_REL_COMPACT(beqzc(a0, 0), "d8800000 beqzc a0, 0", 0); - COMPARE_PC_REL_COMPACT(beqzc(a0, 1), "d8800001 beqzc a0, 1", 1); - COMPARE_PC_REL_COMPACT(beqzc(a0, 1048575), - "d88fffff beqzc a0, 1048575", 1048575); - - COMPARE_PC_REL_COMPACT(bnezc(a0, 0), "f8800000 bnezc a0, 0", 0); - COMPARE_PC_REL_COMPACT(bnezc(a0, 1048575), // int21 maximal value. - "f88fffff bnezc a0, 1048575", 1048575); - COMPARE_PC_REL_COMPACT(bnezc(a0, -1048576), // int21 minimal value. - "f8900000 bnezc a0, -1048576", -1048576); - - COMPARE_PC_REL_COMPACT(bc(-33554432), "ca000000 bc -33554432", - -33554432); - COMPARE_PC_REL_COMPACT(bc(-1), "cbffffff bc -1", -1); - COMPARE_PC_REL_COMPACT(bc(0), "c8000000 bc 0", 0); - COMPARE_PC_REL_COMPACT(bc(1), "c8000001 bc 1", 1); - COMPARE_PC_REL_COMPACT(bc(33554431), "c9ffffff bc 33554431", - 33554431); - - COMPARE_PC_REL_COMPACT(balc(-33554432), "ea000000 balc -33554432", - -33554432); - COMPARE_PC_REL_COMPACT(balc(-1), "ebffffff balc -1", -1); - COMPARE_PC_REL_COMPACT(balc(0), "e8000000 balc 0", 0); - COMPARE_PC_REL_COMPACT(balc(1), "e8000001 balc 1", 1); - COMPARE_PC_REL_COMPACT(balc(33554431), "e9ffffff balc 33554431", - 33554431); - - COMPARE_PC_REL_COMPACT(bgeuc(a0, a1, -32768), - "18858000 bgeuc a0, a1, -32768", -32768); - COMPARE_PC_REL_COMPACT(bgeuc(a0, a1, -1), - "1885ffff bgeuc a0, a1, -1", -1); - COMPARE_PC_REL_COMPACT(bgeuc(a0, a1, 1), "18850001 bgeuc a0, a1, 1", - 1); - COMPARE_PC_REL_COMPACT(bgeuc(a0, a1, 32767), - "18857fff bgeuc a0, a1, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bgezalc(a0, -32768), - "18848000 bgezalc a0, -32768", -32768); - COMPARE_PC_REL_COMPACT(bgezalc(a0, -1), "1884ffff bgezalc a0, -1", - -1); - COMPARE_PC_REL_COMPACT(bgezalc(a0, 1), "18840001 bgezalc a0, 1", 1); - COMPARE_PC_REL_COMPACT(bgezalc(a0, 32767), - "18847fff bgezalc a0, 32767", 32767); - - COMPARE_PC_REL_COMPACT(blezalc(a0, -32768), - "18048000 blezalc a0, -32768", -32768); - COMPARE_PC_REL_COMPACT(blezalc(a0, -1), "1804ffff blezalc a0, -1", - -1); - COMPARE_PC_REL_COMPACT(blezalc(a0, 1), "18040001 blezalc a0, 1", 1); - COMPARE_PC_REL_COMPACT(blezalc(a0, 32767), - "18047fff blezalc a0, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bltuc(a0, a1, -32768), - "1c858000 bltuc a0, a1, -32768", -32768); - COMPARE_PC_REL_COMPACT(bltuc(a0, a1, -1), - "1c85ffff bltuc a0, a1, -1", -1); - COMPARE_PC_REL_COMPACT(bltuc(a0, a1, 1), "1c850001 bltuc a0, a1, 1", - 1); - COMPARE_PC_REL_COMPACT(bltuc(a0, a1, 32767), - "1c857fff bltuc a0, a1, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bltzalc(a0, -32768), - "1c848000 bltzalc a0, -32768", -32768); - COMPARE_PC_REL_COMPACT(bltzalc(a0, -1), "1c84ffff bltzalc a0, -1", - -1); - COMPARE_PC_REL_COMPACT(bltzalc(a0, 1), "1c840001 bltzalc a0, 1", 1); - COMPARE_PC_REL_COMPACT(bltzalc(a0, 32767), - "1c847fff bltzalc a0, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bgtzalc(a0, -32768), - "1c048000 bgtzalc a0, -32768", -32768); - COMPARE_PC_REL_COMPACT(bgtzalc(a0, -1), "1c04ffff bgtzalc a0, -1", - -1); - COMPARE_PC_REL_COMPACT(bgtzalc(a0, 1), "1c040001 bgtzalc a0, 1", 1); - COMPARE_PC_REL_COMPACT(bgtzalc(a0, 32767), - "1c047fff bgtzalc a0, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bgezc(a0, -32768), - "58848000 bgezc a0, -32768", -32768); - COMPARE_PC_REL_COMPACT(bgezc(a0, -1), "5884ffff bgezc a0, -1", -1); - COMPARE_PC_REL_COMPACT(bgezc(a0, 1), "58840001 bgezc a0, 1", 1); - COMPARE_PC_REL_COMPACT(bgezc(a0, 32767), - "58847fff bgezc a0, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bgec(a0, a1, -32768), - "58858000 bgec a0, a1, -32768", -32768); - COMPARE_PC_REL_COMPACT(bgec(a0, a1, -1), - "5885ffff bgec a0, a1, -1", -1); - COMPARE_PC_REL_COMPACT(bgec(a0, a1, 1), "58850001 bgec a0, a1, 1", - 1); - COMPARE_PC_REL_COMPACT(bgec(a0, a1, 32767), - "58857fff bgec a0, a1, 32767", 32767); - - COMPARE_PC_REL_COMPACT(blezc(a0, -32768), - "58048000 blezc a0, -32768", -32768); - COMPARE_PC_REL_COMPACT(blezc(a0, -1), "5804ffff blezc a0, -1", -1); - COMPARE_PC_REL_COMPACT(blezc(a0, 1), "58040001 blezc a0, 1", 1); - COMPARE_PC_REL_COMPACT(blezc(a0, 32767), - "58047fff blezc a0, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bltzc(a0, -32768), - "5c848000 bltzc a0, -32768", -32768); - COMPARE_PC_REL_COMPACT(bltzc(a0, -1), "5c84ffff bltzc a0, -1", -1); - COMPARE_PC_REL_COMPACT(bltzc(a0, 1), "5c840001 bltzc a0, 1", 1); - COMPARE_PC_REL_COMPACT(bltzc(a0, 32767), - "5c847fff bltzc a0, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bltc(a0, a1, -32768), - "5c858000 bltc a0, a1, -32768", -32768); - COMPARE_PC_REL_COMPACT(bltc(a0, a1, -1), - "5c85ffff bltc a0, a1, -1", -1); - COMPARE_PC_REL_COMPACT(bltc(a0, a1, 1), "5c850001 bltc a0, a1, 1", - 1); - COMPARE_PC_REL_COMPACT(bltc(a0, a1, 32767), - "5c857fff bltc a0, a1, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bgtzc(a0, -32768), - "5c048000 bgtzc a0, -32768", -32768); - COMPARE_PC_REL_COMPACT(bgtzc(a0, -1), "5c04ffff bgtzc a0, -1", -1); - COMPARE_PC_REL_COMPACT(bgtzc(a0, 1), "5c040001 bgtzc a0, 1", 1); - COMPARE_PC_REL_COMPACT(bgtzc(a0, 32767), - "5c047fff bgtzc a0, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bc1eqz(-32768, f1), - "45218000 bc1eqz f1, -32768", -32768); - COMPARE_PC_REL_COMPACT(bc1eqz(-1, f1), "4521ffff bc1eqz f1, -1", - -1); - COMPARE_PC_REL_COMPACT(bc1eqz(1, f1), "45210001 bc1eqz f1, 1", 1); - COMPARE_PC_REL_COMPACT(bc1eqz(32767, f1), - "45217fff bc1eqz f1, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bc1nez(-32768, f1), - "45a18000 bc1nez f1, -32768", -32768); - COMPARE_PC_REL_COMPACT(bc1nez(-1, f1), "45a1ffff bc1nez f1, -1", - -1); - COMPARE_PC_REL_COMPACT(bc1nez(1, f1), "45a10001 bc1nez f1, 1", 1); - COMPARE_PC_REL_COMPACT(bc1nez(32767, f1), - "45a17fff bc1nez f1, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bovc(a1, a0, -1), "20a4ffff bovc a1, a0, -1", - -1); - COMPARE_PC_REL_COMPACT(bovc(a0, a0, 1), "20840001 bovc a0, a0, 1", - 1); - - COMPARE_PC_REL_COMPACT(beqc(a0, a1, -32768), - "20858000 beqc a0, a1, -32768", -32768); - COMPARE_PC_REL_COMPACT(beqc(a0, a1, -1), - "2085ffff beqc a0, a1, -1", -1); - COMPARE_PC_REL_COMPACT(beqc(a0, a1, 1), "20850001 beqc a0, a1, 1", - 1); - COMPARE_PC_REL_COMPACT(beqc(a0, a1, 32767), - "20857fff beqc a0, a1, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bnec(a0, a1, -32768), - "60858000 bnec a0, a1, -32768", -32768); - COMPARE_PC_REL_COMPACT(bnec(a0, a1, -1), "6085ffff bnec a0, a1, -1", - -1); - COMPARE_PC_REL_COMPACT(bnec(a0, a1, 1), "60850001 bnec a0, a1, 1", - 1); - COMPARE_PC_REL_COMPACT(bnec(a0, a1, 32767), - "60857fff bnec a0, a1, 32767", 32767); - } - - COMPARE_PC_REL_COMPACT(bne(a0, a1, -32768), - "14858000 bne a0, a1, -32768", -32768); - COMPARE_PC_REL_COMPACT(bne(a0, a1, -1), "1485ffff bne a0, a1, -1", - -1); - COMPARE_PC_REL_COMPACT(bne(a0, a1, 1), "14850001 bne a0, a1, 1", 1); - COMPARE_PC_REL_COMPACT(bne(a0, a1, 32767), - "14857fff bne a0, a1, 32767", 32767); - - COMPARE_PC_REL_COMPACT(beq(a0, a1, -32768), - "10858000 beq a0, a1, -32768", -32768); - COMPARE_PC_REL_COMPACT(beq(a0, a1, -1), "1085ffff beq a0, a1, -1", - -1); - COMPARE_PC_REL_COMPACT(beq(a0, a1, 1), "10850001 beq a0, a1, 1", 1); - COMPARE_PC_REL_COMPACT(beq(a0, a1, 32767), - "10857fff beq a0, a1, 32767", 32767); - - COMPARE_PC_REL_COMPACT(bltz(a0, -32768), "04808000 bltz a0, -32768", - -32768); - COMPARE_PC_REL_COMPACT(bltz(a0, -1), "0480ffff bltz a0, -1", -1); - COMPARE_PC_REL_COMPACT(bltz(a0, 1), "04800001 bltz a0, 1", 1); - COMPARE_PC_REL_COMPACT(bltz(a0, 32767), "04807fff bltz a0, 32767", - 32767); - - COMPARE_PC_REL_COMPACT(bgez(a0, -32768), "04818000 bgez a0, -32768", - -32768); - COMPARE_PC_REL_COMPACT(bgez(a0, -1), "0481ffff bgez a0, -1", -1); - COMPARE_PC_REL_COMPACT(bgez(a0, 1), "04810001 bgez a0, 1", 1); - COMPARE_PC_REL_COMPACT(bgez(a0, 32767), "04817fff bgez a0, 32767", - 32767); - - COMPARE_PC_REL_COMPACT(blez(a0, -32768), "18808000 blez a0, -32768", - -32768); - COMPARE_PC_REL_COMPACT(blez(a0, -1), "1880ffff blez a0, -1", -1); - COMPARE_PC_REL_COMPACT(blez(a0, 1), "18800001 blez a0, 1", 1); - COMPARE_PC_REL_COMPACT(blez(a0, 32767), "18807fff blez a0, 32767", - 32767); - - COMPARE_PC_REL_COMPACT(bgtz(a0, -32768), "1c808000 bgtz a0, -32768", - -32768); - COMPARE_PC_REL_COMPACT(bgtz(a0, -1), "1c80ffff bgtz a0, -1", -1); - COMPARE_PC_REL_COMPACT(bgtz(a0, 1), "1c800001 bgtz a0, 1", 1); - COMPARE_PC_REL_COMPACT(bgtz(a0, 32767), "1c807fff bgtz a0, 32767", - 32767); - - int32_t pc_region; - GET_PC_REGION(pc_region); - - int32_t target = pc_region | 0x4; - COMPARE_PC_JUMP(j(target), "08000001 j ", target); - target = pc_region | 0xFFFFFFC; - COMPARE_PC_JUMP(j(target), "0bffffff j ", target); - - target = pc_region | 0x4; - COMPARE_PC_JUMP(jal(target), "0c000001 jal ", target); - target = pc_region | 0xFFFFFFC; - COMPARE_PC_JUMP(jal(target), "0fffffff jal ", target); - - COMPARE(addiu(a0, a1, 0x0), - "24a40000 addiu a0, a1, 0"); - COMPARE(addiu(s0, s1, 32767), - "26307fff addiu s0, s1, 32767"); - COMPARE(addiu(t2, t3, -32768), - "256a8000 addiu t2, t3, -32768"); - COMPARE(addiu(v0, v1, -1), - "2462ffff addiu v0, v1, -1"); - - COMPARE(and_(a0, a1, a2), - "00a62024 and a0, a1, a2"); - COMPARE(and_(s0, s1, s2), - "02328024 and s0, s1, s2"); - COMPARE(and_(t2, t3, t4), - "016c5024 and t2, t3, t4"); - COMPARE(and_(v0, v1, a2), - "00661024 and v0, v1, a2"); - - COMPARE(or_(a0, a1, a2), - "00a62025 or a0, a1, a2"); - COMPARE(or_(s0, s1, s2), - "02328025 or s0, s1, s2"); - COMPARE(or_(t2, t3, t4), - "016c5025 or t2, t3, t4"); - COMPARE(or_(v0, v1, a2), - "00661025 or v0, v1, a2"); - - COMPARE(xor_(a0, a1, a2), - "00a62026 xor a0, a1, a2"); - COMPARE(xor_(s0, s1, s2), - "02328026 xor s0, s1, s2"); - COMPARE(xor_(t2, t3, t4), - "016c5026 xor t2, t3, t4"); - COMPARE(xor_(v0, v1, a2), - "00661026 xor v0, v1, a2"); - - COMPARE(nor(a0, a1, a2), - "00a62027 nor a0, a1, a2"); - COMPARE(nor(s0, s1, s2), - "02328027 nor s0, s1, s2"); - COMPARE(nor(t2, t3, t4), - "016c5027 nor t2, t3, t4"); - COMPARE(nor(v0, v1, a2), - "00661027 nor v0, v1, a2"); - - COMPARE(andi(a0, a1, 0x1), - "30a40001 andi a0, a1, 0x1"); - COMPARE(andi(v0, v1, 0xffff), - "3062ffff andi v0, v1, 0xffff"); - - COMPARE(ori(a0, a1, 0x1), - "34a40001 ori a0, a1, 0x1"); - COMPARE(ori(v0, v1, 0xffff), - "3462ffff ori v0, v1, 0xffff"); - - COMPARE(xori(a0, a1, 0x1), - "38a40001 xori a0, a1, 0x1"); - COMPARE(xori(v0, v1, 0xffff), - "3862ffff xori v0, v1, 0xffff"); - - COMPARE(lui(a0, 0x1), - "3c040001 lui a0, 0x1"); - COMPARE(lui(v0, 0xffff), - "3c02ffff lui v0, 0xffff"); - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(aui(a0, a1, 0x1), "3ca40001 aui a0, a1, 0x1"); - COMPARE(aui(v0, v1, 0xffff), "3c62ffff aui v0, v1, 0xffff"); - } - - COMPARE(sll(a0, a1, 0), - "00052000 sll a0, a1, 0"); - COMPARE(sll(s0, s1, 8), - "00118200 sll s0, s1, 8"); - COMPARE(sll(t2, t3, 24), - "000b5600 sll t2, t3, 24"); - COMPARE(sll(v0, v1, 31), - "000317c0 sll v0, v1, 31"); - - COMPARE(sllv(a0, a1, a2), - "00c52004 sllv a0, a1, a2"); - COMPARE(sllv(s0, s1, s2), - "02518004 sllv s0, s1, s2"); - COMPARE(sllv(t2, t3, t4), - "018b5004 sllv t2, t3, t4"); - COMPARE(sllv(v0, v1, fp), - "03c31004 sllv v0, v1, fp"); - - COMPARE(srl(a0, a1, 0), - "00052002 srl a0, a1, 0"); - COMPARE(srl(s0, s1, 8), - "00118202 srl s0, s1, 8"); - COMPARE(srl(t2, t3, 24), - "000b5602 srl t2, t3, 24"); - COMPARE(srl(v0, v1, 31), - "000317c2 srl v0, v1, 31"); - - COMPARE(srlv(a0, a1, a2), - "00c52006 srlv a0, a1, a2"); - COMPARE(srlv(s0, s1, s2), - "02518006 srlv s0, s1, s2"); - COMPARE(srlv(t2, t3, t4), - "018b5006 srlv t2, t3, t4"); - COMPARE(srlv(v0, v1, fp), - "03c31006 srlv v0, v1, fp"); - - COMPARE(sra(a0, a1, 0), - "00052003 sra a0, a1, 0"); - COMPARE(sra(s0, s1, 8), - "00118203 sra s0, s1, 8"); - COMPARE(sra(t2, t3, 24), - "000b5603 sra t2, t3, 24"); - COMPARE(sra(v0, v1, 31), - "000317c3 sra v0, v1, 31"); - - COMPARE(srav(a0, a1, a2), - "00c52007 srav a0, a1, a2"); - COMPARE(srav(s0, s1, s2), - "02518007 srav s0, s1, s2"); - COMPARE(srav(t2, t3, t4), - "018b5007 srav t2, t3, t4"); - COMPARE(srav(v0, v1, fp), - "03c31007 srav v0, v1, fp"); - - if (IsMipsArchVariant(kMips32r2)) { - COMPARE(rotr(a0, a1, 0), - "00252002 rotr a0, a1, 0"); - COMPARE(rotr(s0, s1, 8), - "00318202 rotr s0, s1, 8"); - COMPARE(rotr(t2, t3, 24), - "002b5602 rotr t2, t3, 24"); - COMPARE(rotr(v0, v1, 31), - "002317c2 rotr v0, v1, 31"); - - COMPARE(rotrv(a0, a1, a2), - "00c52046 rotrv a0, a1, a2"); - COMPARE(rotrv(s0, s1, s2), - "02518046 rotrv s0, s1, s2"); - COMPARE(rotrv(t2, t3, t4), - "018b5046 rotrv t2, t3, t4"); - COMPARE(rotrv(v0, v1, fp), - "03c31046 rotrv v0, v1, fp"); - } - - COMPARE(break_(0), - "0000000d break, code: 0x00000 (0)"); - COMPARE(break_(261120), - "00ff000d break, code: 0x3fc00 (261120)"); - COMPARE(break_(1047552), - "03ff000d break, code: 0xffc00 (1047552)"); - - COMPARE(tge(a0, a1, 0), - "00850030 tge a0, a1, code: 0x000"); - COMPARE(tge(s0, s1, 1023), - "0211fff0 tge s0, s1, code: 0x3ff"); - COMPARE(tgeu(a0, a1, 0), - "00850031 tgeu a0, a1, code: 0x000"); - COMPARE(tgeu(s0, s1, 1023), - "0211fff1 tgeu s0, s1, code: 0x3ff"); - COMPARE(tlt(a0, a1, 0), - "00850032 tlt a0, a1, code: 0x000"); - COMPARE(tlt(s0, s1, 1023), - "0211fff2 tlt s0, s1, code: 0x3ff"); - COMPARE(tltu(a0, a1, 0), - "00850033 tltu a0, a1, code: 0x000"); - COMPARE(tltu(s0, s1, 1023), - "0211fff3 tltu s0, s1, code: 0x3ff"); - COMPARE(teq(a0, a1, 0), - "00850034 teq a0, a1, code: 0x000"); - COMPARE(teq(s0, s1, 1023), - "0211fff4 teq s0, s1, code: 0x3ff"); - COMPARE(tne(a0, a1, 0), - "00850036 tne a0, a1, code: 0x000"); - COMPARE(tne(s0, s1, 1023), - "0211fff6 tne s0, s1, code: 0x3ff"); - - COMPARE(mfhi(a0), - "00002010 mfhi a0"); - COMPARE(mfhi(s2), - "00009010 mfhi s2"); - COMPARE(mfhi(t4), - "00006010 mfhi t4"); - COMPARE(mfhi(v1), - "00001810 mfhi v1"); - COMPARE(mflo(a0), - "00002012 mflo a0"); - COMPARE(mflo(s2), - "00009012 mflo s2"); - COMPARE(mflo(t4), - "00006012 mflo t4"); - COMPARE(mflo(v1), - "00001812 mflo v1"); - - COMPARE(slt(a0, a1, a2), - "00a6202a slt a0, a1, a2"); - COMPARE(slt(s0, s1, s2), - "0232802a slt s0, s1, s2"); - COMPARE(slt(t2, t3, t4), - "016c502a slt t2, t3, t4"); - COMPARE(slt(v0, v1, a2), - "0066102a slt v0, v1, a2"); - COMPARE(sltu(a0, a1, a2), - "00a6202b sltu a0, a1, a2"); - COMPARE(sltu(s0, s1, s2), - "0232802b sltu s0, s1, s2"); - COMPARE(sltu(t2, t3, t4), - "016c502b sltu t2, t3, t4"); - COMPARE(sltu(v0, v1, a2), - "0066102b sltu v0, v1, a2"); - - COMPARE(slti(a0, a1, 0), - "28a40000 slti a0, a1, 0"); - COMPARE(slti(s0, s1, 32767), - "2a307fff slti s0, s1, 32767"); - COMPARE(slti(t2, t3, -32768), - "296a8000 slti t2, t3, -32768"); - COMPARE(slti(v0, v1, -1), - "2862ffff slti v0, v1, -1"); - COMPARE(sltiu(a0, a1, 0), - "2ca40000 sltiu a0, a1, 0"); - COMPARE(sltiu(s0, s1, 32767), - "2e307fff sltiu s0, s1, 32767"); - COMPARE(sltiu(t2, t3, -32768), - "2d6a8000 sltiu t2, t3, -32768"); - COMPARE(sltiu(v0, v1, -1), - "2c62ffff sltiu v0, v1, -1"); - - if (!IsMipsArchVariant(kLoongson)) { - COMPARE(movz(a0, a1, a2), - "00a6200a movz a0, a1, a2"); - COMPARE(movz(s0, s1, s2), - "0232800a movz s0, s1, s2"); - COMPARE(movz(t2, t3, t4), - "016c500a movz t2, t3, t4"); - COMPARE(movz(v0, v1, a2), - "0066100a movz v0, v1, a2"); - COMPARE(movn(a0, a1, a2), - "00a6200b movn a0, a1, a2"); - COMPARE(movn(s0, s1, s2), - "0232800b movn s0, s1, s2"); - COMPARE(movn(t2, t3, t4), - "016c500b movn t2, t3, t4"); - COMPARE(movn(v0, v1, a2), - "0066100b movn v0, v1, a2"); - - COMPARE(movt(a0, a1, 1), - "00a52001 movt a0, a1, 1"); - COMPARE(movt(s0, s1, 2), - "02298001 movt s0, s1, 2"); - COMPARE(movt(t2, t3, 3), - "016d5001 movt t2, t3, 3"); - COMPARE(movt(v0, v1, 7), - "007d1001 movt v0, v1, 7"); - COMPARE(movf(a0, a1, 0), - "00a02001 movf a0, a1, 0"); - COMPARE(movf(s0, s1, 4), - "02308001 movf s0, s1, 4"); - COMPARE(movf(t2, t3, 5), - "01745001 movf t2, t3, 5"); - COMPARE(movf(v0, v1, 6), - "00781001 movf v0, v1, 6"); - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(clz(a0, a1), - "00a02050 clz a0, a1"); - COMPARE(clz(s6, s7), - "02e0b050 clz s6, s7"); - COMPARE(clz(v0, v1), - "00601050 clz v0, v1"); - } else { - COMPARE(clz(a0, a1), - "70a42020 clz a0, a1"); - COMPARE(clz(s6, s7), - "72f6b020 clz s6, s7"); - COMPARE(clz(v0, v1), - "70621020 clz v0, v1"); - } - } - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - COMPARE(seb(a0, a1), "7c052420 seb a0, a1"); - COMPARE(seb(s6, s7), "7c17b420 seb s6, s7"); - COMPARE(seb(v0, v1), "7c031420 seb v0, v1"); - - COMPARE(seh(a0, a1), "7c052620 seh a0, a1"); - COMPARE(seh(s6, s7), "7c17b620 seh s6, s7"); - COMPARE(seh(v0, v1), "7c031620 seh v0, v1"); - - COMPARE(wsbh(a0, a1), "7c0520a0 wsbh a0, a1"); - COMPARE(wsbh(s6, s7), "7c17b0a0 wsbh s6, s7"); - COMPARE(wsbh(v0, v1), "7c0310a0 wsbh v0, v1"); - } - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - COMPARE(ins_(a0, a1, 31, 1), - "7ca4ffc4 ins a0, a1, 31, 1"); - COMPARE(ins_(s6, s7, 30, 2), - "7ef6ff84 ins s6, s7, 30, 2"); - COMPARE(ins_(v0, v1, 0, 32), - "7c62f804 ins v0, v1, 0, 32"); - COMPARE(ext_(a0, a1, 31, 1), - "7ca407c0 ext a0, a1, 31, 1"); - COMPARE(ext_(s6, s7, 30, 2), - "7ef60f80 ext s6, s7, 30, 2"); - COMPARE(ext_(v0, v1, 0, 32), - "7c62f800 ext v0, v1, 0, 32"); - } - COMPARE(add_s(f4, f6, f8), "46083100 add.s f4, f6, f8"); - COMPARE(add_d(f12, f14, f16), "46307300 add.d f12, f14, f16"); - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(bitswap(a0, a1), "7c052020 bitswap a0, a1"); - COMPARE(bitswap(t8, s0), "7c10c020 bitswap t8, s0"); - } - - COMPARE(abs_s(f6, f8), "46004185 abs.s f6, f8"); - COMPARE(abs_d(f10, f12), "46206285 abs.d f10, f12"); - - COMPARE(div_s(f2, f4, f6), "46062083 div.s f2, f4, f6"); - COMPARE(div_d(f2, f4, f6), "46262083 div.d f2, f4, f6"); - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(align(v0, a0, a1, 0), "7c851220 align v0, a0, a1, 0"); - COMPARE(align(v0, a0, a1, 1), "7c851260 align v0, a0, a1, 1"); - COMPARE(align(v0, a0, a1, 2), "7c8512a0 align v0, a0, a1, 2"); - COMPARE(align(v0, a0, a1, 3), "7c8512e0 align v0, a0, a1, 3"); - } - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(aluipc(v0, 0), "ec5f0000 aluipc v0, 0"); - COMPARE(aluipc(v0, 1), "ec5f0001 aluipc v0, 1"); - COMPARE(aluipc(v0, 32767), "ec5f7fff aluipc v0, 32767"); - COMPARE(aluipc(v0, -32768), "ec5f8000 aluipc v0, -32768"); - COMPARE(aluipc(v0, -1), "ec5fffff aluipc v0, -1"); - } - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(auipc(t8, 0), "ef1e0000 auipc t8, 0"); - COMPARE(auipc(t8, 1), "ef1e0001 auipc t8, 1"); - COMPARE(auipc(t8, 32767), "ef1e7fff auipc t8, 32767"); - COMPARE(auipc(t8, -32768), "ef1e8000 auipc t8, -32768"); - COMPARE(auipc(t8, -1), "ef1effff auipc t8, -1"); - } - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(lwpc(t1, 0), "ed280000 lwpc t1, 0"); - COMPARE(lwpc(t1, 4), "ed280004 lwpc t1, 4"); - COMPARE(lwpc(t1, -4), "ed2ffffc lwpc t1, -4"); - } - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(jic(t0, -32768), "d8088000 jic t0, -32768"); - COMPARE(jic(t0, -1), "d808ffff jic t0, -1"); - COMPARE(jic(t0, 0), "d8080000 jic t0, 0"); - COMPARE(jic(t0, 4), "d8080004 jic t0, 4"); - COMPARE(jic(t0, 32767), "d8087fff jic t0, 32767"); - } - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(addiupc(a0, 262143), "ec83ffff addiupc a0, 262143"); - COMPARE(addiupc(a0, -1), "ec87ffff addiupc a0, -1"); - COMPARE(addiupc(v0, 0), "ec400000 addiupc v0, 0"); - COMPARE(addiupc(s1, 1), "ee200001 addiupc s1, 1"); - COMPARE(addiupc(a0, -262144), "ec840000 addiupc a0, -262144"); - } - - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(jialc(a0, -32768), "f8048000 jialc a0, -32768"); - COMPARE(jialc(a0, -1), "f804ffff jialc a0, -1"); - COMPARE(jialc(v0, 0), "f8020000 jialc v0, 0"); - COMPARE(jialc(s1, 1), "f8110001 jialc s1, 1"); - COMPARE(jialc(a0, 32767), "f8047fff jialc a0, 32767"); - } - - VERIFY_RUN(); -} - - -TEST(Type1) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(seleqz(a0, a1, a2), "00a62035 seleqz a0, a1, a2"); - COMPARE(selnez(a0, a1, a2), "00a62037 selnez a0, a1, a2"); - - - COMPARE(seleqz_d(f3, f4, f5), "462520d4 seleqz.d f3, f4, f5"); - COMPARE(selnez_d(f3, f4, f5), "462520d7 selnez.d f3, f4, f5"); - COMPARE(seleqz_s(f3, f4, f5), "460520d4 seleqz.s f3, f4, f5"); - COMPARE(selnez_s(f3, f4, f5), "460520d7 selnez.s f3, f4, f5"); - - COMPARE(min_d(f3, f4, f5), "462520dc min.d f3, f4, f5"); - COMPARE(max_d(f3, f4, f5), "462520de max.d f3, f4, f5"); - - COMPARE(sel_s(f3, f4, f5), "460520d0 sel.s f3, f4, f5"); - COMPARE(sel_d(f3, f4, f5), "462520d0 sel.d f3, f4, f5"); - - COMPARE(rint_d(f8, f6), "4620321a rint.d f8, f6"); - COMPARE(rint_s(f8, f6), "4600321a rint.s f8, f6"); - - COMPARE(min_s(f3, f4, f5), "460520dc min.s f3, f4, f5"); - COMPARE(max_s(f3, f4, f5), "460520de max.s f3, f4, f5"); - - COMPARE(mina_d(f3, f4, f5), "462520dd mina.d f3, f4, f5"); - COMPARE(mina_s(f3, f4, f5), "460520dd mina.s f3, f4, f5"); - - COMPARE(maxa_d(f3, f4, f5), "462520df maxa.d f3, f4, f5"); - COMPARE(maxa_s(f3, f4, f5), "460520df maxa.s f3, f4, f5"); - } - - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - COMPARE(trunc_l_d(f8, f6), "46203209 trunc.l.d f8, f6"); - COMPARE(trunc_l_s(f8, f6), "46003209 trunc.l.s f8, f6"); - - COMPARE(round_l_s(f8, f6), "46003208 round.l.s f8, f6"); - COMPARE(round_l_d(f8, f6), "46203208 round.l.d f8, f6"); - - COMPARE(floor_l_s(f8, f6), "4600320b floor.l.s f8, f6"); - COMPARE(floor_l_d(f8, f6), "4620320b floor.l.d f8, f6"); - - COMPARE(ceil_l_s(f8, f6), "4600320a ceil.l.s f8, f6"); - COMPARE(ceil_l_d(f8, f6), "4620320a ceil.l.d f8, f6"); - } - - COMPARE(trunc_w_d(f8, f6), "4620320d trunc.w.d f8, f6"); - COMPARE(trunc_w_s(f8, f6), "4600320d trunc.w.s f8, f6"); - - COMPARE(round_w_s(f8, f6), "4600320c round.w.s f8, f6"); - COMPARE(round_w_d(f8, f6), "4620320c round.w.d f8, f6"); - - COMPARE(floor_w_s(f8, f6), "4600320f floor.w.s f8, f6"); - COMPARE(floor_w_d(f8, f6), "4620320f floor.w.d f8, f6"); - - COMPARE(ceil_w_s(f8, f6), "4600320e ceil.w.s f8, f6"); - COMPARE(ceil_w_d(f8, f6), "4620320e ceil.w.d f8, f6"); - - COMPARE(sub_s(f10, f8, f6), "46064281 sub.s f10, f8, f6"); - COMPARE(sub_d(f10, f8, f6), "46264281 sub.d f10, f8, f6"); - - COMPARE(sqrt_s(f8, f6), "46003204 sqrt.s f8, f6"); - COMPARE(sqrt_d(f8, f6), "46203204 sqrt.d f8, f6"); - - COMPARE(neg_s(f8, f6), "46003207 neg.s f8, f6"); - COMPARE(neg_d(f8, f6), "46203207 neg.d f8, f6"); - - COMPARE(mul_s(f8, f6, f4), "46043202 mul.s f8, f6, f4"); - COMPARE(mul_d(f8, f6, f4), "46243202 mul.d f8, f6, f4"); - - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - COMPARE(rsqrt_s(f8, f6), "46003216 rsqrt.s f8, f6"); - COMPARE(rsqrt_d(f8, f6), "46203216 rsqrt.d f8, f6"); - - COMPARE(recip_s(f8, f6), "46003215 recip.s f8, f6"); - COMPARE(recip_d(f8, f6), "46203215 recip.d f8, f6"); - } - - COMPARE(mov_s(f6, f4), "46002186 mov.s f6, f4"); - COMPARE(mov_d(f6, f4), "46202186 mov.d f6, f4"); - - if (IsMipsArchVariant(kMips32r2)) { - COMPARE(movz_s(f6, f4, t0), "46082192 movz.s f6, f4, t0"); - COMPARE(movz_d(f6, f4, t0), "46282192 movz.d f6, f4, t0"); - - COMPARE(movt_s(f6, f4, 4), "46112191 movt.s f6, f4, cc(1)"); - COMPARE(movt_d(f6, f4, 4), "46312191 movt.d f6, f4, cc(1)"); - - COMPARE(movf_s(f6, f4, 4), "46102191 movf.s f6, f4, cc(1)"); - COMPARE(movf_d(f6, f4, 4), "46302191 movf.d f6, f4, cc(1)"); - - COMPARE(movn_s(f6, f4, t0), "46082193 movn.s f6, f4, t0"); - COMPARE(movn_d(f6, f4, t0), "46282193 movn.d f6, f4, t0"); - } - VERIFY_RUN(); -} - - -TEST(Type2) { - if (IsMipsArchVariant(kMips32r6)) { - SET_UP(); - - COMPARE(class_s(f3, f4), "460020db class.s f3, f4"); - COMPARE(class_d(f2, f3), "4620189b class.d f2, f3"); - - VERIFY_RUN(); - } -} - - -TEST(C_FMT_DISASM) { - if (IsMipsArchVariant(kMips32r1) || IsMipsArchVariant(kMips32r2)) { - SET_UP(); - - COMPARE(c_s(F, f8, f10, 0), "460a4030 c.f.s f8, f10, cc(0)"); - COMPARE(c_d(F, f8, f10, 0), "462a4030 c.f.d f8, f10, cc(0)"); - - COMPARE(c_s(UN, f8, f10, 2), "460a4231 c.un.s f8, f10, cc(2)"); - COMPARE(c_d(UN, f8, f10, 2), "462a4231 c.un.d f8, f10, cc(2)"); - - COMPARE(c_s(EQ, f8, f10, 4), "460a4432 c.eq.s f8, f10, cc(4)"); - COMPARE(c_d(EQ, f8, f10, 4), "462a4432 c.eq.d f8, f10, cc(4)"); - - COMPARE(c_s(UEQ, f8, f10, 6), "460a4633 c.ueq.s f8, f10, cc(6)"); - COMPARE(c_d(UEQ, f8, f10, 6), "462a4633 c.ueq.d f8, f10, cc(6)"); - - COMPARE(c_s(OLT, f8, f10, 0), "460a4034 c.olt.s f8, f10, cc(0)"); - COMPARE(c_d(OLT, f8, f10, 0), "462a4034 c.olt.d f8, f10, cc(0)"); - - COMPARE(c_s(ULT, f8, f10, 2), "460a4235 c.ult.s f8, f10, cc(2)"); - COMPARE(c_d(ULT, f8, f10, 2), "462a4235 c.ult.d f8, f10, cc(2)"); - - COMPARE(c_s(OLE, f8, f10, 4), "460a4436 c.ole.s f8, f10, cc(4)"); - COMPARE(c_d(OLE, f8, f10, 4), "462a4436 c.ole.d f8, f10, cc(4)"); - - COMPARE(c_s(ULE, f8, f10, 6), "460a4637 c.ule.s f8, f10, cc(6)"); - COMPARE(c_d(ULE, f8, f10, 6), "462a4637 c.ule.d f8, f10, cc(6)"); - - VERIFY_RUN(); - } -} - - -TEST(COND_FMT_DISASM) { - if (IsMipsArchVariant(kMips32r6)) { - SET_UP(); - - COMPARE(cmp_s(F, f6, f8, f10), "468a4180 cmp.af.s f6, f8, f10"); - COMPARE(cmp_d(F, f6, f8, f10), "46aa4180 cmp.af.d f6, f8, f10"); - - COMPARE(cmp_s(UN, f6, f8, f10), "468a4181 cmp.un.s f6, f8, f10"); - COMPARE(cmp_d(UN, f6, f8, f10), "46aa4181 cmp.un.d f6, f8, f10"); - - COMPARE(cmp_s(EQ, f6, f8, f10), "468a4182 cmp.eq.s f6, f8, f10"); - COMPARE(cmp_d(EQ, f6, f8, f10), "46aa4182 cmp.eq.d f6, f8, f10"); - - COMPARE(cmp_s(UEQ, f6, f8, f10), "468a4183 cmp.ueq.s f6, f8, f10"); - COMPARE(cmp_d(UEQ, f6, f8, f10), "46aa4183 cmp.ueq.d f6, f8, f10"); - - COMPARE(cmp_s(LT, f6, f8, f10), "468a4184 cmp.lt.s f6, f8, f10"); - COMPARE(cmp_d(LT, f6, f8, f10), "46aa4184 cmp.lt.d f6, f8, f10"); - - COMPARE(cmp_s(ULT, f6, f8, f10), "468a4185 cmp.ult.s f6, f8, f10"); - COMPARE(cmp_d(ULT, f6, f8, f10), "46aa4185 cmp.ult.d f6, f8, f10"); - - COMPARE(cmp_s(LE, f6, f8, f10), "468a4186 cmp.le.s f6, f8, f10"); - COMPARE(cmp_d(LE, f6, f8, f10), "46aa4186 cmp.le.d f6, f8, f10"); - - COMPARE(cmp_s(ULE, f6, f8, f10), "468a4187 cmp.ule.s f6, f8, f10"); - COMPARE(cmp_d(ULE, f6, f8, f10), "46aa4187 cmp.ule.d f6, f8, f10"); - - COMPARE(cmp_s(ORD, f6, f8, f10), "468a4191 cmp.or.s f6, f8, f10"); - COMPARE(cmp_d(ORD, f6, f8, f10), "46aa4191 cmp.or.d f6, f8, f10"); - - COMPARE(cmp_s(UNE, f6, f8, f10), "468a4192 cmp.une.s f6, f8, f10"); - COMPARE(cmp_d(UNE, f6, f8, f10), "46aa4192 cmp.une.d f6, f8, f10"); - - COMPARE(cmp_s(NE, f6, f8, f10), "468a4193 cmp.ne.s f6, f8, f10"); - COMPARE(cmp_d(NE, f6, f8, f10), "46aa4193 cmp.ne.d f6, f8, f10"); - - VERIFY_RUN(); - } -} - - -TEST(CVT_DISSASM) { - SET_UP(); - COMPARE(cvt_d_s(f22, f24), "4600c5a1 cvt.d.s f22, f24"); - COMPARE(cvt_d_w(f22, f24), "4680c5a1 cvt.d.w f22, f24"); - - COMPARE(cvt_s_d(f22, f24), "4620c5a0 cvt.s.d f22, f24"); - COMPARE(cvt_s_w(f22, f24), "4680c5a0 cvt.s.w f22, f24"); - - if ((IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) && - IsFp64Mode()) { - COMPARE(cvt_d_l(f22, f24), "46a0c5a1 cvt.d.l f22, f24"); - COMPARE(cvt_l_d(f22, f24), "4620c5a5 cvt.l.d f22, f24"); - - COMPARE(cvt_s_l(f22, f24), "46a0c5a0 cvt.s.l f22, f24"); - COMPARE(cvt_l_s(f22, f24), "4600c5a5 cvt.l.s f22, f24"); - } - - VERIFY_RUN(); -} - - -TEST(ctc1_cfc1_disasm) { - SET_UP(); - COMPARE(abs_d(f10, f31), "4620fa85 abs.d f10, f31"); - COMPARE(ceil_w_s(f8, f31), "4600fa0e ceil.w.s f8, f31"); - COMPARE(ctc1(a0, FCSR), "44c4f800 ctc1 a0, FCSR"); - COMPARE(cfc1(a0, FCSR), "4444f800 cfc1 a0, FCSR"); - VERIFY_RUN(); -} - -TEST(madd_msub_maddf_msubf) { - SET_UP(); - if (IsMipsArchVariant(kMips32r2)) { - COMPARE(madd_s(f4, f6, f8, f10), "4cca4120 madd.s f4, f6, f8, f10"); - COMPARE(madd_d(f4, f6, f8, f10), "4cca4121 madd.d f4, f6, f8, f10"); - COMPARE(msub_s(f4, f6, f8, f10), "4cca4128 msub.s f4, f6, f8, f10"); - COMPARE(msub_d(f4, f6, f8, f10), "4cca4129 msub.d f4, f6, f8, f10"); - } - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(maddf_s(f4, f8, f10), "460a4118 maddf.s f4, f8, f10"); - COMPARE(maddf_d(f4, f8, f10), "462a4118 maddf.d f4, f8, f10"); - COMPARE(msubf_s(f4, f8, f10), "460a4119 msubf.s f4, f8, f10"); - COMPARE(msubf_d(f4, f8, f10), "462a4119 msubf.d f4, f8, f10"); - } - VERIFY_RUN(); -} - -TEST(atomic_load_store) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6)) { - COMPARE(ll(v0, MemOperand(v1, -1)), "7c62ffb6 ll v0, -1(v1)"); - COMPARE(sc(v0, MemOperand(v1, 1)), "7c6200a6 sc v0, 1(v1)"); - } else { - COMPARE(ll(v0, MemOperand(v1, -1)), "c062ffff ll v0, -1(v1)"); - COMPARE(sc(v0, MemOperand(v1, 1)), "e0620001 sc v0, 1(v1)"); - } - VERIFY_RUN(); -} - -TEST(MSA_BRANCH) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE_MSA_BRANCH(bnz_b(w0, 1), "47800001 bnz.b w0, 1", 1); - COMPARE_MSA_BRANCH(bnz_h(w1, -1), "47a1ffff bnz.h w1, -1", -1); - COMPARE_MSA_BRANCH(bnz_w(w2, 32767), "47c27fff bnz.w w2, 32767", - 32767); - COMPARE_MSA_BRANCH(bnz_d(w3, -32768), "47e38000 bnz.d w3, -32768", - -32768); - COMPARE_MSA_BRANCH(bnz_v(w0, static_cast(0)), - "45e00000 bnz.v w0, 0", 0); - COMPARE_MSA_BRANCH(bz_b(w0, 1), "47000001 bz.b w0, 1", 1); - COMPARE_MSA_BRANCH(bz_h(w1, -1), "4721ffff bz.h w1, -1", -1); - COMPARE_MSA_BRANCH(bz_w(w2, 32767), "47427fff bz.w w2, 32767", - 32767); - COMPARE_MSA_BRANCH(bz_d(w3, -32768), "47638000 bz.d w3, -32768", - -32768); - COMPARE_MSA_BRANCH(bz_v(w0, static_cast(0)), - "45600000 bz.v w0, 0", 0); - } - VERIFY_RUN(); -} - -TEST(MSA_MI10) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(ld_b(w0, MemOperand(at, -512)), - "7a000820 ld.b w0, -512(at)"); - COMPARE(ld_b(w1, MemOperand(v0, 0)), "78001060 ld.b w1, 0(v0)"); - COMPARE(ld_b(w2, MemOperand(v1, 511)), "79ff18a0 ld.b w2, 511(v1)"); - COMPARE(ld_h(w4, MemOperand(a1, -512)), - "7a002921 ld.h w4, -512(a1)"); - COMPARE(ld_h(w5, MemOperand(a2, 64)), "78403161 ld.h w5, 64(a2)"); - COMPARE(ld_h(w6, MemOperand(a3, 511)), "79ff39a1 ld.h w6, 511(a3)"); - COMPARE(ld_w(w10, MemOperand(t3, -512)), - "7a005aa2 ld.w w10, -512(t3)"); - COMPARE(ld_w(w11, MemOperand(t4, 511)), - "79ff62e2 ld.w w11, 511(t4)"); - COMPARE(ld_w(w12, MemOperand(t5, -128)), - "7b806b22 ld.w w12, -128(t5)"); - COMPARE(ld_d(w17, MemOperand(s2, -512)), - "7a009463 ld.d w17, -512(s2)"); - COMPARE(ld_d(w18, MemOperand(s3, 128)), - "78809ca3 ld.d w18, 128(s3)"); - COMPARE(ld_d(w19, MemOperand(s4, 511)), - "79ffa4e3 ld.d w19, 511(s4)"); - COMPARE(st_b(w0, MemOperand(at, -512)), - "7a000824 st.b w0, -512(at)"); - COMPARE(st_b(w1, MemOperand(v0, 0)), "78001064 st.b w1, 0(v0)"); - COMPARE(st_b(w2, MemOperand(v1, 511)), "79ff18a4 st.b w2, 511(v1)"); - COMPARE(st_h(w4, MemOperand(a1, -512)), - "7a002925 st.h w4, -512(a1)"); - COMPARE(st_h(w5, MemOperand(a2, 64)), "78403165 st.h w5, 64(a2)"); - COMPARE(st_h(w6, MemOperand(a3, 511)), "79ff39a5 st.h w6, 511(a3)"); - COMPARE(st_w(w10, MemOperand(t3, -512)), - "7a005aa6 st.w w10, -512(t3)"); - COMPARE(st_w(w11, MemOperand(t4, 511)), - "79ff62e6 st.w w11, 511(t4)"); - COMPARE(st_w(w12, MemOperand(t5, -128)), - "7b806b26 st.w w12, -128(t5)"); - COMPARE(st_d(w17, MemOperand(s2, -512)), - "7a009467 st.d w17, -512(s2)"); - COMPARE(st_d(w18, MemOperand(s3, 128)), - "78809ca7 st.d w18, 128(s3)"); - COMPARE(st_d(w19, MemOperand(s4, 511)), - "79ffa4e7 st.d w19, 511(s4)"); - } - VERIFY_RUN(); -} - -TEST(MSA_I5) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(addvi_b(w3, w31, 30), "781ef8c6 addvi.b w3, w31, 30"); - COMPARE(addvi_h(w24, w13, 26), "783a6e06 addvi.h w24, w13, 26"); - COMPARE(addvi_w(w26, w20, 26), "785aa686 addvi.w w26, w20, 26"); - COMPARE(addvi_d(w16, w1, 21), "78750c06 addvi.d w16, w1, 21"); - COMPARE(ceqi_b(w24, w21, -8), "7818ae07 ceqi.b w24, w21, -8"); - COMPARE(ceqi_h(w31, w15, 2), "78227fc7 ceqi.h w31, w15, 2"); - COMPARE(ceqi_w(w12, w1, -1), "785f0b07 ceqi.w w12, w1, -1"); - COMPARE(ceqi_d(w24, w22, 7), "7867b607 ceqi.d w24, w22, 7"); - COMPARE(clei_s_b(w12, w16, 1), "7a018307 clei_s.b w12, w16, 1"); - COMPARE(clei_s_h(w2, w10, -9), "7a375087 clei_s.h w2, w10, -9"); - COMPARE(clei_s_w(w4, w11, -10), "7a565907 clei_s.w w4, w11, -10"); - COMPARE(clei_s_d(w0, w29, -10), "7a76e807 clei_s.d w0, w29, -10"); - COMPARE(clei_u_b(w21, w17, 3), "7a838d47 clei_u.b w21, w17, 3"); - COMPARE(clei_u_h(w29, w7, 17), "7ab13f47 clei_u.h w29, w7, 17"); - COMPARE(clei_u_w(w1, w1, 2), "7ac20847 clei_u.w w1, w1, 2"); - COMPARE(clei_u_d(w27, w27, 29), "7afddec7 clei_u.d w27, w27, 29"); - COMPARE(clti_s_b(w19, w13, -7), "79196cc7 clti_s.b w19, w13, -7"); - COMPARE(clti_s_h(w15, w10, -12), "793453c7 clti_s.h w15, w10, -12"); - COMPARE(clti_s_w(w12, w12, 11), "794b6307 clti_s.w w12, w12, 11"); - COMPARE(clti_s_d(w29, w20, -15), "7971a747 clti_s.d w29, w20, -15"); - COMPARE(clti_u_b(w14, w9, 29), "799d4b87 clti_u.b w14, w9, 29"); - COMPARE(clti_u_h(w24, w25, 25), "79b9ce07 clti_u.h w24, w25, 25"); - COMPARE(clti_u_w(w1, w1, 22), "79d60847 clti_u.w w1, w1, 22"); - COMPARE(clti_u_d(w21, w25, 1), "79e1cd47 clti_u.d w21, w25, 1"); - COMPARE(maxi_s_b(w22, w21, 1), "7901ad86 maxi_s.b w22, w21, 1"); - COMPARE(maxi_s_h(w29, w5, -8), "79382f46 maxi_s.h w29, w5, -8"); - COMPARE(maxi_s_w(w1, w10, -12), "79545046 maxi_s.w w1, w10, -12"); - COMPARE(maxi_s_d(w13, w29, -16), "7970eb46 maxi_s.d w13, w29, -16"); - COMPARE(maxi_u_b(w20, w0, 12), "798c0506 maxi_u.b w20, w0, 12"); - COMPARE(maxi_u_h(w1, w14, 3), "79a37046 maxi_u.h w1, w14, 3"); - COMPARE(maxi_u_w(w27, w22, 11), "79cbb6c6 maxi_u.w w27, w22, 11"); - COMPARE(maxi_u_d(w26, w6, 4), "79e43686 maxi_u.d w26, w6, 4"); - COMPARE(mini_s_b(w4, w1, 1), "7a010906 mini_s.b w4, w1, 1"); - COMPARE(mini_s_h(w27, w27, -9), "7a37dec6 mini_s.h w27, w27, -9"); - COMPARE(mini_s_w(w28, w11, 9), "7a495f06 mini_s.w w28, w11, 9"); - COMPARE(mini_s_d(w11, w10, 10), "7a6a52c6 mini_s.d w11, w10, 10"); - COMPARE(mini_u_b(w18, w23, 27), "7a9bbc86 mini_u.b w18, w23, 27"); - COMPARE(mini_u_h(w7, w26, 18), "7ab2d1c6 mini_u.h w7, w26, 18"); - COMPARE(mini_u_w(w11, w12, 26), "7ada62c6 mini_u.w w11, w12, 26"); - COMPARE(mini_u_d(w11, w15, 2), "7ae27ac6 mini_u.d w11, w15, 2"); - COMPARE(subvi_b(w24, w20, 19), "7893a606 subvi.b w24, w20, 19"); - COMPARE(subvi_h(w11, w19, 4), "78a49ac6 subvi.h w11, w19, 4"); - COMPARE(subvi_w(w12, w10, 11), "78cb5306 subvi.w w12, w10, 11"); - COMPARE(subvi_d(w19, w16, 7), "78e784c6 subvi.d w19, w16, 7"); - } - VERIFY_RUN(); -} - -TEST(MSA_I10) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(ldi_b(w8, 198), "7b063207 ldi.b w8, 198"); - COMPARE(ldi_h(w20, 313), "7b29cd07 ldi.h w20, 313"); - COMPARE(ldi_w(w24, 492), "7b4f6607 ldi.w w24, 492"); - COMPARE(ldi_d(w27, -180), "7b7a66c7 ldi.d w27, -180"); - } - VERIFY_RUN(); -} - -TEST(MSA_I8) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(andi_b(w2, w29, 48), "7830e880 andi.b w2, w29, 48"); - COMPARE(bmnzi_b(w6, w22, 126), "787eb181 bmnzi.b w6, w22, 126"); - COMPARE(bmzi_b(w27, w1, 88), "79580ec1 bmzi.b w27, w1, 88"); - COMPARE(bseli_b(w29, w3, 189), "7abd1f41 bseli.b w29, w3, 189"); - COMPARE(nori_b(w1, w17, 56), "7a388840 nori.b w1, w17, 56"); - COMPARE(ori_b(w26, w20, 135), "7987a680 ori.b w26, w20, 135"); - COMPARE(shf_b(w19, w30, 105), "7869f4c2 shf.b w19, w30, 105"); - COMPARE(shf_h(w17, w8, 76), "794c4442 shf.h w17, w8, 76"); - COMPARE(shf_w(w14, w3, 93), "7a5d1b82 shf.w w14, w3, 93"); - COMPARE(xori_b(w16, w10, 20), "7b145400 xori.b w16, w10, 20"); - } - VERIFY_RUN(); -} - -TEST(MSA_VEC) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(and_v(w25, w20, w27), "781ba65e and.v w25, w20, w27"); - COMPARE(bmnz_v(w17, w6, w7), "7887345e bmnz.v w17, w6, w7"); - COMPARE(bmz_v(w3, w17, w9), "78a988de bmz.v w3, w17, w9"); - COMPARE(bsel_v(w8, w0, w14), "78ce021e bsel.v w8, w0, w14"); - COMPARE(nor_v(w7, w31, w0), "7840f9de nor.v w7, w31, w0"); - COMPARE(or_v(w24, w26, w30), "783ed61e or.v w24, w26, w30"); - COMPARE(xor_v(w7, w27, w15), "786fd9de xor.v w7, w27, w15"); - } - VERIFY_RUN(); -} - -TEST(MSA_2R) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(fill_b(w30, t1), "7b004f9e fill.b w30, t1"); - COMPARE(fill_h(w31, s7), "7b01bfde fill.h w31, s7"); - COMPARE(fill_w(w16, t8), "7b02c41e fill.w w16, t8"); - COMPARE(nloc_b(w21, w0), "7b08055e nloc.b w21, w0"); - COMPARE(nloc_h(w18, w31), "7b09fc9e nloc.h w18, w31"); - COMPARE(nloc_w(w2, w23), "7b0ab89e nloc.w w2, w23"); - COMPARE(nloc_d(w4, w10), "7b0b511e nloc.d w4, w10"); - COMPARE(nlzc_b(w31, w2), "7b0c17de nlzc.b w31, w2"); - COMPARE(nlzc_h(w27, w22), "7b0db6de nlzc.h w27, w22"); - COMPARE(nlzc_w(w10, w29), "7b0eea9e nlzc.w w10, w29"); - COMPARE(nlzc_d(w25, w9), "7b0f4e5e nlzc.d w25, w9"); - COMPARE(pcnt_b(w20, w18), "7b04951e pcnt.b w20, w18"); - COMPARE(pcnt_h(w0, w8), "7b05401e pcnt.h w0, w8"); - COMPARE(pcnt_w(w23, w9), "7b064dde pcnt.w w23, w9"); - COMPARE(pcnt_d(w21, w24), "7b07c55e pcnt.d w21, w24"); - } - VERIFY_RUN(); -} - -TEST(MSA_2RF) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(fclass_w(w26, w12), "7b20669e fclass.w w26, w12"); - COMPARE(fclass_d(w24, w17), "7b218e1e fclass.d w24, w17"); - COMPARE(fexupl_w(w8, w0), "7b30021e fexupl.w w8, w0"); - COMPARE(fexupl_d(w17, w29), "7b31ec5e fexupl.d w17, w29"); - COMPARE(fexupr_w(w13, w4), "7b32235e fexupr.w w13, w4"); - COMPARE(fexupr_d(w5, w2), "7b33115e fexupr.d w5, w2"); - COMPARE(ffint_s_w(w20, w29), "7b3ced1e ffint_s.w w20, w29"); - COMPARE(ffint_s_d(w12, w15), "7b3d7b1e ffint_s.d w12, w15"); - COMPARE(ffint_u_w(w7, w27), "7b3ed9de ffint_u.w w7, w27"); - COMPARE(ffint_u_d(w19, w16), "7b3f84de ffint_u.d w19, w16"); - COMPARE(ffql_w(w31, w13), "7b346fde ffql.w w31, w13"); - COMPARE(ffql_d(w12, w13), "7b356b1e ffql.d w12, w13"); - COMPARE(ffqr_w(w27, w30), "7b36f6de ffqr.w w27, w30"); - COMPARE(ffqr_d(w30, w15), "7b377f9e ffqr.d w30, w15"); - COMPARE(flog2_w(w25, w31), "7b2efe5e flog2.w w25, w31"); - COMPARE(flog2_d(w18, w10), "7b2f549e flog2.d w18, w10"); - COMPARE(frint_w(w7, w15), "7b2c79de frint.w w7, w15"); - COMPARE(frint_d(w21, w22), "7b2db55e frint.d w21, w22"); - COMPARE(frcp_w(w19, w0), "7b2a04de frcp.w w19, w0"); - COMPARE(frcp_d(w4, w14), "7b2b711e frcp.d w4, w14"); - COMPARE(frsqrt_w(w12, w17), "7b288b1e frsqrt.w w12, w17"); - COMPARE(frsqrt_d(w23, w11), "7b295dde frsqrt.d w23, w11"); - COMPARE(fsqrt_w(w0, w11), "7b26581e fsqrt.w w0, w11"); - COMPARE(fsqrt_d(w15, w12), "7b2763de fsqrt.d w15, w12"); - COMPARE(ftint_s_w(w30, w5), "7b382f9e ftint_s.w w30, w5"); - COMPARE(ftint_s_d(w5, w23), "7b39b95e ftint_s.d w5, w23"); - COMPARE(ftint_u_w(w20, w14), "7b3a751e ftint_u.w w20, w14"); - COMPARE(ftint_u_d(w23, w21), "7b3badde ftint_u.d w23, w21"); - COMPARE(ftrunc_s_w(w29, w17), "7b228f5e ftrunc_s.w w29, w17"); - COMPARE(ftrunc_s_d(w12, w27), "7b23db1e ftrunc_s.d w12, w27"); - COMPARE(ftrunc_u_w(w17, w15), "7b247c5e ftrunc_u.w w17, w15"); - COMPARE(ftrunc_u_d(w5, w27), "7b25d95e ftrunc_u.d w5, w27"); - } - VERIFY_RUN(); -} - -TEST(MSA_3R) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(add_a_b(w26, w9, w4), "78044e90 add_a.b w26, w9, w4"); - COMPARE(add_a_h(w23, w27, w31), "783fddd0 add_a.h w23, w27, w31"); - COMPARE(add_a_w(w11, w6, w22), "785632d0 add_a.w w11, w6, w22"); - COMPARE(add_a_d(w6, w10, w0), "78605190 add_a.d w6, w10, w0"); - COMPARE(adds_a_b(w19, w24, w19), "7893c4d0 adds_a.b w19, w24, w19"); - COMPARE(adds_a_h(w25, w6, w4), "78a43650 adds_a.h w25, w6, w4"); - COMPARE(adds_a_w(w25, w17, w27), "78db8e50 adds_a.w w25, w17, w27"); - COMPARE(adds_a_d(w15, w18, w26), "78fa93d0 adds_a.d w15, w18, w26"); - COMPARE(adds_s_b(w29, w11, w19), "79135f50 adds_s.b w29, w11, w19"); - COMPARE(adds_s_h(w5, w23, w26), "793ab950 adds_s.h w5, w23, w26"); - COMPARE(adds_s_w(w16, w14, w13), "794d7410 adds_s.w w16, w14, w13"); - COMPARE(adds_s_d(w2, w14, w28), "797c7090 adds_s.d w2, w14, w28"); - COMPARE(adds_u_b(w3, w17, w14), "798e88d0 adds_u.b w3, w17, w14"); - COMPARE(adds_u_h(w10, w30, w4), "79a4f290 adds_u.h w10, w30, w4"); - COMPARE(adds_u_w(w15, w18, w20), "79d493d0 adds_u.w w15, w18, w20"); - COMPARE(adds_u_d(w30, w10, w9), "79e95790 adds_u.d w30, w10, w9"); - COMPARE(addv_b(w24, w20, w21), "7815a60e addv.b w24, w20, w21"); - COMPARE(addv_h(w4, w13, w27), "783b690e addv.h w4, w13, w27"); - COMPARE(addv_w(w19, w11, w14), "784e5cce addv.w w19, w11, w14"); - COMPARE(addv_d(w2, w21, w31), "787fa88e addv.d w2, w21, w31"); - COMPARE(asub_s_b(w23, w16, w3), "7a0385d1 asub_s.b w23, w16, w3"); - COMPARE(asub_s_h(w22, w17, w25), "7a398d91 asub_s.h w22, w17, w25"); - COMPARE(asub_s_w(w24, w1, w9), "7a490e11 asub_s.w w24, w1, w9"); - COMPARE(asub_s_d(w13, w12, w12), "7a6c6351 asub_s.d w13, w12, w12"); - COMPARE(asub_u_b(w10, w29, w11), "7a8bea91 asub_u.b w10, w29, w11"); - COMPARE(asub_u_h(w18, w9, w15), "7aaf4c91 asub_u.h w18, w9, w15"); - COMPARE(asub_u_w(w10, w19, w31), "7adf9a91 asub_u.w w10, w19, w31"); - COMPARE(asub_u_d(w17, w10, w0), "7ae05451 asub_u.d w17, w10, w0"); - COMPARE(ave_s_b(w2, w5, w1), "7a012890 ave_s.b w2, w5, w1"); - COMPARE(ave_s_h(w16, w19, w9), "7a299c10 ave_s.h w16, w19, w9"); - COMPARE(ave_s_w(w17, w31, w5), "7a45fc50 ave_s.w w17, w31, w5"); - COMPARE(ave_s_d(w27, w25, w10), "7a6aced0 ave_s.d w27, w25, w10"); - COMPARE(ave_u_b(w16, w19, w9), "7a899c10 ave_u.b w16, w19, w9"); - COMPARE(ave_u_h(w28, w28, w11), "7aabe710 ave_u.h w28, w28, w11"); - COMPARE(ave_u_w(w11, w12, w11), "7acb62d0 ave_u.w w11, w12, w11"); - COMPARE(ave_u_d(w30, w19, w28), "7afc9f90 ave_u.d w30, w19, w28"); - COMPARE(aver_s_b(w26, w16, w2), "7b028690 aver_s.b w26, w16, w2"); - COMPARE(aver_s_h(w31, w27, w27), "7b3bdfd0 aver_s.h w31, w27, w27"); - COMPARE(aver_s_w(w28, w18, w25), "7b599710 aver_s.w w28, w18, w25"); - COMPARE(aver_s_d(w29, w21, w27), "7b7baf50 aver_s.d w29, w21, w27"); - COMPARE(aver_u_b(w29, w26, w3), "7b83d750 aver_u.b w29, w26, w3"); - COMPARE(aver_u_h(w18, w18, w9), "7ba99490 aver_u.h w18, w18, w9"); - COMPARE(aver_u_w(w17, w25, w29), "7bddcc50 aver_u.w w17, w25, w29"); - COMPARE(aver_u_d(w22, w22, w19), "7bf3b590 aver_u.d w22, w22, w19"); - COMPARE(bclr_b(w2, w15, w29), "799d788d bclr.b w2, w15, w29"); - COMPARE(bclr_h(w16, w21, w28), "79bcac0d bclr.h w16, w21, w28"); - COMPARE(bclr_w(w19, w2, w9), "79c914cd bclr.w w19, w2, w9"); - COMPARE(bclr_d(w27, w31, w4), "79e4fecd bclr.d w27, w31, w4"); - COMPARE(binsl_b(w5, w16, w24), "7b18814d binsl.b w5, w16, w24"); - COMPARE(binsl_h(w30, w5, w10), "7b2a2f8d binsl.h w30, w5, w10"); - COMPARE(binsl_w(w14, w15, w13), "7b4d7b8d binsl.w w14, w15, w13"); - COMPARE(binsl_d(w23, w20, w12), "7b6ca5cd binsl.d w23, w20, w12"); - COMPARE(binsr_b(w22, w11, w2), "7b825d8d binsr.b w22, w11, w2"); - COMPARE(binsr_h(w0, w26, w6), "7ba6d00d binsr.h w0, w26, w6"); - COMPARE(binsr_w(w26, w3, w28), "7bdc1e8d binsr.w w26, w3, w28"); - COMPARE(binsr_d(w0, w0, w21), "7bf5000d binsr.d w0, w0, w21"); - COMPARE(bneg_b(w0, w11, w24), "7a98580d bneg.b w0, w11, w24"); - COMPARE(bneg_h(w28, w16, w4), "7aa4870d bneg.h w28, w16, w4"); - COMPARE(bneg_w(w3, w26, w19), "7ad3d0cd bneg.w w3, w26, w19"); - COMPARE(bneg_d(w13, w29, w15), "7aefeb4d bneg.d w13, w29, w15"); - COMPARE(bset_b(w31, w5, w31), "7a1f2fcd bset.b w31, w5, w31"); - COMPARE(bset_h(w14, w12, w6), "7a26638d bset.h w14, w12, w6"); - COMPARE(bset_w(w31, w9, w12), "7a4c4fcd bset.w w31, w9, w12"); - COMPARE(bset_d(w5, w22, w5), "7a65b14d bset.d w5, w22, w5"); - COMPARE(ceq_b(w31, w31, w18), "7812ffcf ceq.b w31, w31, w18"); - COMPARE(ceq_h(w10, w27, w9), "7829da8f ceq.h w10, w27, w9"); - COMPARE(ceq_w(w9, w5, w14), "784e2a4f ceq.w w9, w5, w14"); - COMPARE(ceq_d(w5, w17, w0), "7860894f ceq.d w5, w17, w0"); - COMPARE(cle_s_b(w23, w4, w9), "7a0925cf cle_s.b w23, w4, w9"); - COMPARE(cle_s_h(w22, w27, w19), "7a33dd8f cle_s.h w22, w27, w19"); - COMPARE(cle_s_w(w30, w26, w10), "7a4ad78f cle_s.w w30, w26, w10"); - COMPARE(cle_s_d(w18, w5, w10), "7a6a2c8f cle_s.d w18, w5, w10"); - COMPARE(cle_u_b(w1, w25, w0), "7a80c84f cle_u.b w1, w25, w0"); - COMPARE(cle_u_h(w7, w0, w29), "7abd01cf cle_u.h w7, w0, w29"); - COMPARE(cle_u_w(w25, w18, w1), "7ac1964f cle_u.w w25, w18, w1"); - COMPARE(cle_u_d(w6, w0, w30), "7afe018f cle_u.d w6, w0, w30"); - COMPARE(clt_s_b(w25, w2, w21), "7915164f clt_s.b w25, w2, w21"); - COMPARE(clt_s_h(w2, w19, w9), "7929988f clt_s.h w2, w19, w9"); - COMPARE(clt_s_w(w23, w8, w16), "795045cf clt_s.w w23, w8, w16"); - COMPARE(clt_s_d(w7, w30, w12), "796cf1cf clt_s.d w7, w30, w12"); - COMPARE(clt_u_b(w2, w31, w13), "798df88f clt_u.b w2, w31, w13"); - COMPARE(clt_u_h(w16, w31, w23), "79b7fc0f clt_u.h w16, w31, w23"); - COMPARE(clt_u_w(w3, w24, w9), "79c9c0cf clt_u.w w3, w24, w9"); - COMPARE(clt_u_d(w7, w0, w1), "79e101cf clt_u.d w7, w0, w1"); - COMPARE(div_s_b(w29, w3, w18), "7a121f52 div_s.b w29, w3, w18"); - COMPARE(div_s_h(w17, w16, w13), "7a2d8452 div_s.h w17, w16, w13"); - COMPARE(div_s_w(w4, w25, w30), "7a5ec912 div_s.w w4, w25, w30"); - COMPARE(div_s_d(w31, w9, w20), "7a744fd2 div_s.d w31, w9, w20"); - COMPARE(div_u_b(w6, w29, w10), "7a8ae992 div_u.b w6, w29, w10"); - COMPARE(div_u_h(w24, w21, w14), "7aaeae12 div_u.h w24, w21, w14"); - COMPARE(div_u_w(w29, w14, w25), "7ad97752 div_u.w w29, w14, w25"); - COMPARE(div_u_d(w31, w1, w21), "7af50fd2 div_u.d w31, w1, w21"); - COMPARE(dotp_s_h(w23, w22, w25), "7839b5d3 dotp_s.h w23, w22, w25"); - COMPARE(dotp_s_w(w20, w14, w5), "78457513 dotp_s.w w20, w14, w5"); - COMPARE(dotp_s_d(w17, w2, w22), "78761453 dotp_s.d w17, w2, w22"); - COMPARE(dotp_u_h(w13, w2, w6), "78a61353 dotp_u.h w13, w2, w6"); - COMPARE(dotp_u_w(w15, w22, w21), "78d5b3d3 dotp_u.w w15, w22, w21"); - COMPARE(dotp_u_d(w4, w16, w26), "78fa8113 dotp_u.d w4, w16, w26"); - COMPARE(dpadd_s_h(w1, w28, w22), "7936e053 dpadd_s.h w1, w28, w22"); - COMPARE(dpadd_s_w(w10, w1, w12), "794c0a93 dpadd_s.w w10, w1, w12"); - COMPARE(dpadd_s_d(w3, w21, w27), "797ba8d3 dpadd_s.d w3, w21, w27"); - COMPARE(dpadd_u_h(w17, w5, w20), "79b42c53 dpadd_u.h w17, w5, w20"); - COMPARE(dpadd_u_w(w24, w8, w16), "79d04613 dpadd_u.w w24, w8, w16"); - COMPARE(dpadd_u_d(w15, w29, w16), - "79f0ebd3 dpadd_u.d w15, w29, w16"); - COMPARE(dpsub_s_h(w4, w11, w12), "7a2c5913 dpsub_s.h w4, w11, w12"); - COMPARE(dpsub_s_w(w4, w7, w6), "7a463913 dpsub_s.w w4, w7, w6"); - COMPARE(dpsub_s_d(w31, w12, w28), - "7a7c67d3 dpsub_s.d w31, w12, w28"); - COMPARE(dpsub_u_h(w4, w25, w17), "7ab1c913 dpsub_u.h w4, w25, w17"); - COMPARE(dpsub_u_w(w19, w25, w16), - "7ad0ccd3 dpsub_u.w w19, w25, w16"); - COMPARE(dpsub_u_d(w7, w10, w26), "7afa51d3 dpsub_u.d w7, w10, w26"); - COMPARE(hadd_s_h(w28, w24, w2), "7a22c715 hadd_s.h w28, w24, w2"); - COMPARE(hadd_s_w(w24, w17, w11), "7a4b8e15 hadd_s.w w24, w17, w11"); - COMPARE(hadd_s_d(w17, w15, w20), "7a747c55 hadd_s.d w17, w15, w20"); - COMPARE(hadd_u_h(w12, w29, w17), "7ab1eb15 hadd_u.h w12, w29, w17"); - COMPARE(hadd_u_w(w9, w5, w6), "7ac62a55 hadd_u.w w9, w5, w6"); - COMPARE(hadd_u_d(w1, w20, w6), "7ae6a055 hadd_u.d w1, w20, w6"); - COMPARE(hsub_s_h(w16, w14, w29), "7b3d7415 hsub_s.h w16, w14, w29"); - COMPARE(hsub_s_w(w9, w13, w11), "7b4b6a55 hsub_s.w w9, w13, w11"); - COMPARE(hsub_s_d(w30, w18, w14), "7b6e9795 hsub_s.d w30, w18, w14"); - COMPARE(hsub_u_h(w7, w12, w14), "7bae61d5 hsub_u.h w7, w12, w14"); - COMPARE(hsub_u_w(w21, w5, w5), "7bc52d55 hsub_u.w w21, w5, w5"); - COMPARE(hsub_u_d(w11, w12, w31), "7bff62d5 hsub_u.d w11, w12, w31"); - COMPARE(ilvev_b(w18, w16, w30), "7b1e8494 ilvev.b w18, w16, w30"); - COMPARE(ilvev_h(w14, w0, w13), "7b2d0394 ilvev.h w14, w0, w13"); - COMPARE(ilvev_w(w12, w25, w22), "7b56cb14 ilvev.w w12, w25, w22"); - COMPARE(ilvev_d(w30, w27, w3), "7b63df94 ilvev.d w30, w27, w3"); - COMPARE(ilvl_b(w29, w3, w21), "7a151f54 ilvl.b w29, w3, w21"); - COMPARE(ilvl_h(w27, w10, w17), "7a3156d4 ilvl.h w27, w10, w17"); - COMPARE(ilvl_w(w6, w1, w0), "7a400994 ilvl.w w6, w1, w0"); - COMPARE(ilvl_d(w3, w16, w24), "7a7880d4 ilvl.d w3, w16, w24"); - COMPARE(ilvod_b(w11, w5, w20), "7b942ad4 ilvod.b w11, w5, w20"); - COMPARE(ilvod_h(w18, w13, w31), "7bbf6c94 ilvod.h w18, w13, w31"); - COMPARE(ilvod_w(w29, w16, w24), "7bd88754 ilvod.w w29, w16, w24"); - COMPARE(ilvod_d(w22, w12, w29), "7bfd6594 ilvod.d w22, w12, w29"); - COMPARE(ilvr_b(w4, w30, w6), "7a86f114 ilvr.b w4, w30, w6"); - COMPARE(ilvr_h(w28, w19, w29), "7abd9f14 ilvr.h w28, w19, w29"); - COMPARE(ilvr_w(w18, w20, w21), "7ad5a494 ilvr.w w18, w20, w21"); - COMPARE(ilvr_d(w23, w30, w12), "7aecf5d4 ilvr.d w23, w30, w12"); - COMPARE(maddv_b(w17, w31, w29), "789dfc52 maddv.b w17, w31, w29"); - COMPARE(maddv_h(w7, w24, w9), "78a9c1d2 maddv.h w7, w24, w9"); - COMPARE(maddv_w(w22, w22, w20), "78d4b592 maddv.w w22, w22, w20"); - COMPARE(maddv_d(w30, w26, w20), "78f4d792 maddv.d w30, w26, w20"); - COMPARE(max_a_b(w23, w11, w23), "7b175dce max_a.b w23, w11, w23"); - COMPARE(max_a_h(w20, w5, w30), "7b3e2d0e max_a.h w20, w5, w30"); - COMPARE(max_a_w(w7, w18, w30), "7b5e91ce max_a.w w7, w18, w30"); - COMPARE(max_a_d(w8, w8, w31), "7b7f420e max_a.d w8, w8, w31"); - COMPARE(max_s_b(w10, w1, w19), "79130a8e max_s.b w10, w1, w19"); - COMPARE(max_s_h(w15, w29, w17), "7931ebce max_s.h w15, w29, w17"); - COMPARE(max_s_w(w15, w29, w14), "794eebce max_s.w w15, w29, w14"); - COMPARE(max_s_d(w25, w24, w3), "7963c64e max_s.d w25, w24, w3"); - COMPARE(max_u_b(w12, w24, w5), "7985c30e max_u.b w12, w24, w5"); - COMPARE(max_u_h(w5, w6, w7), "79a7314e max_u.h w5, w6, w7"); - COMPARE(max_u_w(w16, w4, w7), "79c7240e max_u.w w16, w4, w7"); - COMPARE(max_u_d(w26, w12, w24), "79f8668e max_u.d w26, w12, w24"); - COMPARE(min_a_b(w4, w26, w1), "7b81d10e min_a.b w4, w26, w1"); - COMPARE(min_a_h(w12, w13, w31), "7bbf6b0e min_a.h w12, w13, w31"); - COMPARE(min_a_w(w28, w20, w0), "7bc0a70e min_a.w w28, w20, w0"); - COMPARE(min_a_d(w12, w20, w19), "7bf3a30e min_a.d w12, w20, w19"); - COMPARE(min_s_b(w19, w3, w14), "7a0e1cce min_s.b w19, w3, w14"); - COMPARE(min_s_h(w27, w21, w8), "7a28aece min_s.h w27, w21, w8"); - COMPARE(min_s_w(w0, w14, w30), "7a5e700e min_s.w w0, w14, w30"); - COMPARE(min_s_d(w6, w8, w21), "7a75418e min_s.d w6, w8, w21"); - COMPARE(min_u_b(w22, w26, w8), "7a88d58e min_u.b w22, w26, w8"); - COMPARE(min_u_h(w7, w27, w12), "7aacd9ce min_u.h w7, w27, w12"); - COMPARE(min_u_w(w8, w20, w14), "7acea20e min_u.w w8, w20, w14"); - COMPARE(min_u_d(w26, w14, w15), "7aef768e min_u.d w26, w14, w15"); - COMPARE(mod_s_b(w18, w1, w26), "7b1a0c92 mod_s.b w18, w1, w26"); - COMPARE(mod_s_h(w31, w30, w28), "7b3cf7d2 mod_s.h w31, w30, w28"); - COMPARE(mod_s_w(w2, w6, w13), "7b4d3092 mod_s.w w2, w6, w13"); - COMPARE(mod_s_d(w21, w27, w22), "7b76dd52 mod_s.d w21, w27, w22"); - COMPARE(mod_u_b(w16, w7, w13), "7b8d3c12 mod_u.b w16, w7, w13"); - COMPARE(mod_u_h(w24, w8, w7), "7ba74612 mod_u.h w24, w8, w7"); - COMPARE(mod_u_w(w30, w2, w17), "7bd11792 mod_u.w w30, w2, w17"); - COMPARE(mod_u_d(w31, w2, w25), "7bf917d2 mod_u.d w31, w2, w25"); - COMPARE(msubv_b(w14, w5, w12), "790c2b92 msubv.b w14, w5, w12"); - COMPARE(msubv_h(w6, w7, w30), "793e3992 msubv.h w6, w7, w30"); - COMPARE(msubv_w(w13, w2, w21), "79551352 msubv.w w13, w2, w21"); - COMPARE(msubv_d(w16, w14, w27), "797b7412 msubv.d w16, w14, w27"); - COMPARE(mulv_b(w20, w3, w13), "780d1d12 mulv.b w20, w3, w13"); - COMPARE(mulv_h(w27, w26, w14), "782ed6d2 mulv.h w27, w26, w14"); - COMPARE(mulv_w(w10, w29, w3), "7843ea92 mulv.w w10, w29, w3"); - COMPARE(mulv_d(w7, w19, w29), "787d99d2 mulv.d w7, w19, w29"); - COMPARE(pckev_b(w5, w27, w7), "7907d954 pckev.b w5, w27, w7"); - COMPARE(pckev_h(w1, w4, w27), "793b2054 pckev.h w1, w4, w27"); - COMPARE(pckev_w(w30, w20, w0), "7940a794 pckev.w w30, w20, w0"); - COMPARE(pckev_d(w6, w1, w15), "796f0994 pckev.d w6, w1, w15"); - COMPARE(pckod_b(w18, w28, w30), "799ee494 pckod.b w18, w28, w30"); - COMPARE(pckod_h(w26, w5, w8), "79a82e94 pckod.h w26, w5, w8"); - COMPARE(pckod_w(w9, w4, w2), "79c22254 pckod.w w9, w4, w2"); - COMPARE(pckod_d(w30, w22, w20), "79f4b794 pckod.d w30, w22, w20"); - COMPARE(sld_b(w5, w23, t4), "780cb954 sld.b w5, w23[t4]"); - COMPARE(sld_h(w1, w23, v1), "7823b854 sld.h w1, w23[v1]"); - COMPARE(sld_w(w20, w8, t1), "78494514 sld.w w20, w8[t1]"); - COMPARE(sld_d(w7, w23, fp), "787eb9d4 sld.d w7, w23[fp]"); - COMPARE(sll_b(w3, w0, w17), "781100cd sll.b w3, w0, w17"); - COMPARE(sll_h(w17, w27, w3), "7823dc4d sll.h w17, w27, w3"); - COMPARE(sll_w(w16, w7, w6), "78463c0d sll.w w16, w7, w6"); - COMPARE(sll_d(w9, w0, w26), "787a024d sll.d w9, w0, w26"); - COMPARE(splat_b(w28, w1, at), "78810f14 splat.b w28, w1[at]"); - COMPARE(splat_h(w2, w11, t3), "78ab5894 splat.h w2, w11[t3]"); - COMPARE(splat_w(w22, w0, t3), "78cb0594 splat.w w22, w0[t3]"); - COMPARE(splat_d(w0, w0, v0), "78e20014 splat.d w0, w0[v0]"); - COMPARE(sra_b(w28, w4, w17), "7891270d sra.b w28, w4, w17"); - COMPARE(sra_h(w13, w9, w3), "78a34b4d sra.h w13, w9, w3"); - COMPARE(sra_w(w27, w21, w19), "78d3aecd sra.w w27, w21, w19"); - COMPARE(sra_d(w30, w8, w23), "78f7478d sra.d w30, w8, w23"); - COMPARE(srar_b(w19, w18, w18), "789294d5 srar.b w19, w18, w18"); - COMPARE(srar_h(w7, w23, w8), "78a8b9d5 srar.h w7, w23, w8"); - COMPARE(srar_w(w1, w12, w2), "78c26055 srar.w w1, w12, w2"); - COMPARE(srar_d(w21, w7, w14), "78ee3d55 srar.d w21, w7, w14"); - COMPARE(srl_b(w12, w3, w19), "79131b0d srl.b w12, w3, w19"); - COMPARE(srl_h(w23, w31, w20), "7934fdcd srl.h w23, w31, w20"); - COMPARE(srl_w(w18, w27, w11), "794bdc8d srl.w w18, w27, w11"); - COMPARE(srl_d(w3, w12, w26), "797a60cd srl.d w3, w12, w26"); - COMPARE(srlr_b(w15, w21, w11), "790babd5 srlr.b w15, w21, w11"); - COMPARE(srlr_h(w21, w13, w19), "79336d55 srlr.h w21, w13, w19"); - COMPARE(srlr_w(w6, w30, w3), "7943f195 srlr.w w6, w30, w3"); - COMPARE(srlr_d(w1, w2, w14), "796e1055 srlr.d w1, w2, w14"); - COMPARE(subs_s_b(w25, w15, w1), "78017e51 subs_s.b w25, w15, w1"); - COMPARE(subs_s_h(w28, w25, w22), "7836cf11 subs_s.h w28, w25, w22"); - COMPARE(subs_s_w(w10, w12, w21), "78556291 subs_s.w w10, w12, w21"); - COMPARE(subs_s_d(w4, w20, w18), "7872a111 subs_s.d w4, w20, w18"); - COMPARE(subs_u_b(w21, w6, w25), "78993551 subs_u.b w21, w6, w25"); - COMPARE(subs_u_h(w3, w10, w7), "78a750d1 subs_u.h w3, w10, w7"); - COMPARE(subs_u_w(w9, w15, w10), "78ca7a51 subs_u.w w9, w15, w10"); - COMPARE(subs_u_d(w7, w19, w10), "78ea99d1 subs_u.d w7, w19, w10"); - COMPARE(subsus_u_b(w6, w7, w12), "790c3991 subsus_u.b w6, w7, w12"); - COMPARE(subsus_u_h(w6, w29, w19), - "7933e991 subsus_u.h w6, w29, w19"); - COMPARE(subsus_u_w(w7, w15, w7), "794779d1 subsus_u.w w7, w15, w7"); - COMPARE(subsus_u_d(w9, w3, w15), "796f1a51 subsus_u.d w9, w3, w15"); - COMPARE(subsuu_s_b(w22, w3, w31), - "799f1d91 subsuu_s.b w22, w3, w31"); - COMPARE(subsuu_s_h(w19, w23, w22), - "79b6bcd1 subsuu_s.h w19, w23, w22"); - COMPARE(subsuu_s_w(w9, w10, w13), - "79cd5251 subsuu_s.w w9, w10, w13"); - COMPARE(subsuu_s_d(w5, w6, w0), "79e03151 subsuu_s.d w5, w6, w0"); - COMPARE(subv_b(w6, w13, w19), "7893698e subv.b w6, w13, w19"); - COMPARE(subv_h(w4, w25, w12), "78acc90e subv.h w4, w25, w12"); - COMPARE(subv_w(w27, w27, w11), "78cbdece subv.w w27, w27, w11"); - COMPARE(subv_d(w9, w24, w10), "78eac24e subv.d w9, w24, w10"); - COMPARE(vshf_b(w3, w16, w5), "780580d5 vshf.b w3, w16, w5"); - COMPARE(vshf_h(w20, w19, w8), "78289d15 vshf.h w20, w19, w8"); - COMPARE(vshf_w(w16, w30, w25), "7859f415 vshf.w w16, w30, w25"); - COMPARE(vshf_d(w19, w11, w15), "786f5cd5 vshf.d w19, w11, w15"); - } - VERIFY_RUN(); -} - -TEST(MSA_3RF) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(fadd_w(w28, w19, w28), "781c9f1b fadd.w w28, w19, w28"); - COMPARE(fadd_d(w13, w2, w29), "783d135b fadd.d w13, w2, w29"); - COMPARE(fcaf_w(w14, w11, w25), "78195b9a fcaf.w w14, w11, w25"); - COMPARE(fcaf_d(w1, w1, w19), "7833085a fcaf.d w1, w1, w19"); - COMPARE(fceq_w(w1, w23, w16), "7890b85a fceq.w w1, w23, w16"); - COMPARE(fceq_d(w0, w8, w16), "78b0401a fceq.d w0, w8, w16"); - COMPARE(fcle_w(w16, w9, w24), "79984c1a fcle.w w16, w9, w24"); - COMPARE(fcle_d(w27, w14, w1), "79a176da fcle.d w27, w14, w1"); - COMPARE(fclt_w(w28, w8, w8), "7908471a fclt.w w28, w8, w8"); - COMPARE(fclt_d(w30, w25, w11), "792bcf9a fclt.d w30, w25, w11"); - COMPARE(fcne_w(w2, w18, w23), "78d7909c fcne.w w2, w18, w23"); - COMPARE(fcne_d(w14, w20, w15), "78efa39c fcne.d w14, w20, w15"); - COMPARE(fcor_w(w10, w18, w25), "7859929c fcor.w w10, w18, w25"); - COMPARE(fcor_d(w17, w25, w11), "786bcc5c fcor.d w17, w25, w11"); - COMPARE(fcueq_w(w14, w2, w21), "78d5139a fcueq.w w14, w2, w21"); - COMPARE(fcueq_d(w29, w3, w7), "78e71f5a fcueq.d w29, w3, w7"); - COMPARE(fcule_w(w17, w5, w3), "79c32c5a fcule.w w17, w5, w3"); - COMPARE(fcule_d(w31, w1, w30), "79fe0fda fcule.d w31, w1, w30"); - COMPARE(fcult_w(w6, w25, w9), "7949c99a fcult.w w6, w25, w9"); - COMPARE(fcult_d(w27, w8, w17), "797146da fcult.d w27, w8, w17"); - COMPARE(fcun_w(w4, w20, w8), "7848a11a fcun.w w4, w20, w8"); - COMPARE(fcun_d(w29, w11, w3), "78635f5a fcun.d w29, w11, w3"); - COMPARE(fcune_w(w13, w18, w19), "7893935c fcune.w w13, w18, w19"); - COMPARE(fcune_d(w16, w26, w21), "78b5d41c fcune.d w16, w26, w21"); - COMPARE(fdiv_w(w13, w24, w2), "78c2c35b fdiv.w w13, w24, w2"); - COMPARE(fdiv_d(w19, w4, w25), "78f924db fdiv.d w19, w4, w25"); - COMPARE(fexdo_h(w8, w0, w16), "7a10021b fexdo.h w8, w0, w16"); - COMPARE(fexdo_w(w0, w13, w27), "7a3b681b fexdo.w w0, w13, w27"); - COMPARE(fexp2_w(w17, w0, w3), "79c3045b fexp2.w w17, w0, w3"); - COMPARE(fexp2_d(w22, w0, w10), "79ea059b fexp2.d w22, w0, w10"); - COMPARE(fmadd_w(w29, w6, w23), "7917375b fmadd.w w29, w6, w23"); - COMPARE(fmadd_d(w11, w28, w21), "7935e2db fmadd.d w11, w28, w21"); - COMPARE(fmax_w(w0, w23, w13), "7b8db81b fmax.w w0, w23, w13"); - COMPARE(fmax_d(w26, w18, w8), "7ba8969b fmax.d w26, w18, w8"); - COMPARE(fmax_a_w(w10, w16, w10), "7bca829b fmax_a.w w10, w16, w10"); - COMPARE(fmax_a_d(w30, w9, w22), "7bf64f9b fmax_a.d w30, w9, w22"); - COMPARE(fmin_w(w24, w1, w30), "7b1e0e1b fmin.w w24, w1, w30"); - COMPARE(fmin_d(w27, w27, w10), "7b2adedb fmin.d w27, w27, w10"); - COMPARE(fmin_a_w(w10, w29, w20), "7b54ea9b fmin_a.w w10, w29, w20"); - COMPARE(fmin_a_d(w13, w30, w24), "7b78f35b fmin_a.d w13, w30, w24"); - COMPARE(fmsub_w(w17, w25, w0), "7940cc5b fmsub.w w17, w25, w0"); - COMPARE(fmsub_d(w8, w18, w16), "7970921b fmsub.d w8, w18, w16"); - COMPARE(fmul_w(w3, w15, w15), "788f78db fmul.w w3, w15, w15"); - COMPARE(fmul_d(w9, w30, w10), "78aaf25b fmul.d w9, w30, w10"); - COMPARE(fsaf_w(w25, w5, w10), "7a0a2e5a fsaf.w w25, w5, w10"); - COMPARE(fsaf_d(w25, w3, w29), "7a3d1e5a fsaf.d w25, w3, w29"); - COMPARE(fseq_w(w11, w17, w13), "7a8d8ada fseq.w w11, w17, w13"); - COMPARE(fseq_d(w29, w0, w31), "7abf075a fseq.d w29, w0, w31"); - COMPARE(fsle_w(w30, w31, w31), "7b9fff9a fsle.w w30, w31, w31"); - COMPARE(fsle_d(w18, w23, w24), "7bb8bc9a fsle.d w18, w23, w24"); - COMPARE(fslt_w(w12, w5, w6), "7b062b1a fslt.w w12, w5, w6"); - COMPARE(fslt_d(w16, w26, w21), "7b35d41a fslt.d w16, w26, w21"); - COMPARE(fsne_w(w30, w1, w12), "7acc0f9c fsne.w w30, w1, w12"); - COMPARE(fsne_d(w14, w13, w23), "7af76b9c fsne.d w14, w13, w23"); - COMPARE(fsor_w(w27, w13, w27), "7a5b6edc fsor.w w27, w13, w27"); - COMPARE(fsor_d(w12, w24, w11), "7a6bc31c fsor.d w12, w24, w11"); - COMPARE(fsub_w(w31, w26, w1), "7841d7db fsub.w w31, w26, w1"); - COMPARE(fsub_d(w19, w17, w27), "787b8cdb fsub.d w19, w17, w27"); - COMPARE(fsueq_w(w16, w24, w25), "7ad9c41a fsueq.w w16, w24, w25"); - COMPARE(fsueq_d(w18, w14, w14), "7aee749a fsueq.d w18, w14, w14"); - COMPARE(fsule_w(w23, w30, w13), "7bcdf5da fsule.w w23, w30, w13"); - COMPARE(fsule_d(w2, w11, w26), "7bfa589a fsule.d w2, w11, w26"); - COMPARE(fsult_w(w11, w26, w22), "7b56d2da fsult.w w11, w26, w22"); - COMPARE(fsult_d(w6, w23, w30), "7b7eb99a fsult.d w6, w23, w30"); - COMPARE(fsun_w(w3, w18, w28), "7a5c90da fsun.w w3, w18, w28"); - COMPARE(fsun_d(w18, w11, w19), "7a735c9a fsun.d w18, w11, w19"); - COMPARE(fsune_w(w16, w31, w2), "7a82fc1c fsune.w w16, w31, w2"); - COMPARE(fsune_d(w3, w26, w17), "7ab1d0dc fsune.d w3, w26, w17"); - COMPARE(ftq_h(w16, w4, w24), "7a98241b ftq.h w16, w4, w24"); - COMPARE(ftq_w(w5, w5, w25), "7ab9295b ftq.w w5, w5, w25"); - COMPARE(madd_q_h(w16, w20, w10), "794aa41c madd_q.h w16, w20, w10"); - COMPARE(madd_q_w(w28, w2, w9), "7969171c madd_q.w w28, w2, w9"); - COMPARE(maddr_q_h(w8, w18, w9), "7b49921c maddr_q.h w8, w18, w9"); - COMPARE(maddr_q_w(w29, w12, w16), - "7b70675c maddr_q.w w29, w12, w16"); - COMPARE(msub_q_h(w24, w26, w10), "798ad61c msub_q.h w24, w26, w10"); - COMPARE(msub_q_w(w13, w30, w28), "79bcf35c msub_q.w w13, w30, w28"); - COMPARE(msubr_q_h(w12, w21, w11), - "7b8bab1c msubr_q.h w12, w21, w11"); - COMPARE(msubr_q_w(w1, w14, w20), "7bb4705c msubr_q.w w1, w14, w20"); - COMPARE(mul_q_h(w6, w16, w30), "791e819c mul_q.h w6, w16, w30"); - COMPARE(mul_q_w(w16, w1, w4), "79240c1c mul_q.w w16, w1, w4"); - COMPARE(mulr_q_h(w6, w20, w19), "7b13a19c mulr_q.h w6, w20, w19"); - COMPARE(mulr_q_w(w27, w1, w20), "7b340edc mulr_q.w w27, w1, w20"); - } - VERIFY_RUN(); -} - -TEST(MSA_ELM) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(copy_s_b(t5, w8, 2), "78824359 copy_s.b t5, w8[2]"); - COMPARE(copy_s_h(at, w25, 0), "78a0c859 copy_s.h at, w25[0]"); - COMPARE(copy_s_w(s6, w5, 1), "78b12d99 copy_s.w s6, w5[1]"); - COMPARE(copy_u_b(s6, w20, 4), "78c4a599 copy_u.b s6, w20[4]"); - COMPARE(copy_u_h(s4, w4, 0), "78e02519 copy_u.h s4, w4[0]"); - COMPARE(sldi_b(w0, w29, 4), "7804e819 sldi.b w0, w29[4]"); - COMPARE(sldi_h(w8, w17, 0), "78208a19 sldi.h w8, w17[0]"); - COMPARE(sldi_w(w20, w27, 2), "7832dd19 sldi.w w20, w27[2]"); - COMPARE(sldi_d(w4, w12, 0), "78386119 sldi.d w4, w12[0]"); - COMPARE(splati_b(w25, w3, 2), "78421e59 splati.b w25, w3[2]"); - COMPARE(splati_h(w24, w28, 1), "7861e619 splati.h w24, w28[1]"); - COMPARE(splati_w(w13, w18, 0), "78709359 splati.w w13, w18[0]"); - COMPARE(splati_d(w28, w1, 0), "78780f19 splati.d w28, w1[0]"); - COMPARE(move_v(w23, w24), "78bec5d9 move.v w23, w24"); - COMPARE(insert_b(w23, 3, sp), "7903edd9 insert.b w23[3], sp"); - COMPARE(insert_h(w20, 2, a1), "79222d19 insert.h w20[2], a1"); - COMPARE(insert_w(w8, 2, s0), "79328219 insert.w w8[2], s0"); - COMPARE(insve_b(w25, 3, w9), "79434e59 insve.b w25[3], w9[0]"); - COMPARE(insve_h(w24, 2, w2), "79621619 insve.h w24[2], w2[0]"); - COMPARE(insve_w(w0, 2, w13), "79726819 insve.w w0[2], w13[0]"); - COMPARE(insve_d(w3, 0, w18), "797890d9 insve.d w3[0], w18[0]"); - COMPARE(cfcmsa(at, MSAIR), "787e0059 cfcmsa at, MSAIR"); - COMPARE(cfcmsa(v0, MSACSR), "787e0899 cfcmsa v0, MSACSR"); - COMPARE(ctcmsa(MSAIR, at), "783e0819 ctcmsa MSAIR, at"); - COMPARE(ctcmsa(MSACSR, v0), "783e1059 ctcmsa MSACSR, v0"); - } - VERIFY_RUN(); -} - -TEST(MSA_BIT) { - SET_UP(); - if (IsMipsArchVariant(kMips32r6) && CpuFeatures::IsSupported(MIPS_SIMD)) { - CpuFeatureScope fscope(&assm, MIPS_SIMD); - - COMPARE(bclri_b(w21, w30, 2), "79f2f549 bclri.b w21, w30, 2"); - COMPARE(bclri_h(w24, w21, 0), "79e0ae09 bclri.h w24, w21, 0"); - COMPARE(bclri_w(w23, w30, 3), "79c3f5c9 bclri.w w23, w30, 3"); - COMPARE(bclri_d(w9, w11, 0), "79805a49 bclri.d w9, w11, 0"); - COMPARE(binsli_b(w25, w12, 1), "7b716649 binsli.b w25, w12, 1"); - COMPARE(binsli_h(w21, w22, 0), "7b60b549 binsli.h w21, w22, 0"); - COMPARE(binsli_w(w22, w4, 0), "7b402589 binsli.w w22, w4, 0"); - COMPARE(binsli_d(w6, w2, 6), "7b061189 binsli.d w6, w2, 6"); - COMPARE(binsri_b(w15, w19, 0), "7bf09bc9 binsri.b w15, w19, 0"); - COMPARE(binsri_h(w8, w30, 1), "7be1f209 binsri.h w8, w30, 1"); - COMPARE(binsri_w(w2, w19, 5), "7bc59889 binsri.w w2, w19, 5"); - COMPARE(binsri_d(w18, w20, 1), "7b81a489 binsri.d w18, w20, 1"); - COMPARE(bnegi_b(w24, w19, 0), "7af09e09 bnegi.b w24, w19, 0"); - COMPARE(bnegi_h(w28, w11, 3), "7ae35f09 bnegi.h w28, w11, 3"); - COMPARE(bnegi_w(w1, w27, 5), "7ac5d849 bnegi.w w1, w27, 5"); - COMPARE(bnegi_d(w4, w21, 1), "7a81a909 bnegi.d w4, w21, 1"); - COMPARE(bseti_b(w18, w8, 0), "7a704489 bseti.b w18, w8, 0"); - COMPARE(bseti_h(w24, w14, 2), "7a627609 bseti.h w24, w14, 2"); - COMPARE(bseti_w(w9, w18, 4), "7a449249 bseti.w w9, w18, 4"); - COMPARE(bseti_d(w7, w15, 1), "7a0179c9 bseti.d w7, w15, 1"); - COMPARE(sat_s_b(w31, w31, 2), "7872ffca sat_s.b w31, w31, 2"); - COMPARE(sat_s_h(w19, w19, 0), "78609cca sat_s.h w19, w19, 0"); - COMPARE(sat_s_w(w19, w29, 0), "7840ecca sat_s.w w19, w29, 0"); - COMPARE(sat_s_d(w11, w22, 0), "7800b2ca sat_s.d w11, w22, 0"); - COMPARE(sat_u_b(w1, w13, 3), "78f3684a sat_u.b w1, w13, 3"); - COMPARE(sat_u_h(w30, w24, 4), "78e4c78a sat_u.h w30, w24, 4"); - COMPARE(sat_u_w(w31, w13, 0), "78c06fca sat_u.w w31, w13, 0"); - COMPARE(sat_u_d(w29, w16, 5), "7885874a sat_u.d w29, w16, 5"); - COMPARE(slli_b(w23, w10, 1), "787155c9 slli.b w23, w10, 1"); - COMPARE(slli_h(w9, w18, 1), "78619249 slli.h w9, w18, 1"); - COMPARE(slli_w(w11, w29, 4), "7844eac9 slli.w w11, w29, 4"); - COMPARE(slli_d(w25, w20, 1), "7801a649 slli.d w25, w20, 1"); - COMPARE(srai_b(w24, w29, 1), "78f1ee09 srai.b w24, w29, 1"); - COMPARE(srai_h(w1, w6, 0), "78e03049 srai.h w1, w6, 0"); - COMPARE(srai_w(w7, w26, 1), "78c1d1c9 srai.w w7, w26, 1"); - COMPARE(srai_d(w20, w25, 3), "7883cd09 srai.d w20, w25, 3"); - COMPARE(srari_b(w5, w25, 0), "7970c94a srari.b w5, w25, 0"); - COMPARE(srari_h(w7, w6, 4), "796431ca srari.h w7, w6, 4"); - COMPARE(srari_w(w17, w11, 5), "79455c4a srari.w w17, w11, 5"); - COMPARE(srari_d(w21, w25, 5), "7905cd4a srari.d w21, w25, 5"); - COMPARE(srli_b(w2, w0, 2), "79720089 srli.b w2, w0, 2"); - COMPARE(srli_h(w31, w31, 2), "7962ffc9 srli.h w31, w31, 2"); - COMPARE(srli_w(w5, w9, 4), "79444949 srli.w w5, w9, 4"); - COMPARE(srli_d(w27, w26, 5), "7905d6c9 srli.d w27, w26, 5"); - COMPARE(srlri_b(w18, w3, 0), "79f01c8a srlri.b w18, w3, 0"); - COMPARE(srlri_h(w1, w2, 3), "79e3104a srlri.h w1, w2, 3"); - COMPARE(srlri_w(w11, w22, 2), "79c2b2ca srlri.w w11, w22, 2"); - COMPARE(srlri_d(w24, w10, 6), "7986560a srlri.d w24, w10, 6"); - } - VERIFY_RUN(); -} - -} // namespace internal -} // namespace v8 diff --git a/test/cctest/test-lockers.cc b/test/cctest/test-lockers.cc index 287dc29e76..3bcfcd3d83 100644 --- a/test/cctest/test-lockers.cc +++ b/test/cctest/test-lockers.cc @@ -524,7 +524,7 @@ class SeparateIsolatesLocksNonexclusiveThread : public JoinableThread { // Run parallel threads that lock and access different isolates in parallel TEST(SeparateIsolatesLocksNonexclusive) { i::FLAG_always_turbofan = false; -#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_S390 +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390 const int kNThreads = 50; #else const int kNThreads = 100; @@ -609,7 +609,7 @@ class LockerUnlockerThread : public JoinableThread { // Use unlocker inside of a Locker, multiple threads. TEST(LockerUnlocker) { i::FLAG_always_turbofan = false; -#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_S390 +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390 const int kNThreads = 50; #else const int kNThreads = 100; @@ -667,7 +667,7 @@ class LockTwiceAndUnlockThread : public JoinableThread { // Use Unlocker inside two Lockers. TEST(LockTwiceAndUnlock) { i::FLAG_always_turbofan = false; -#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_S390 +#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_S390 const int kNThreads = 50; #else const int kNThreads = 100; diff --git a/test/cctest/test-macro-assembler-mips.cc b/test/cctest/test-macro-assembler-mips.cc deleted file mode 100644 index feb86a1e45..0000000000 --- a/test/cctest/test-macro-assembler-mips.cc +++ /dev/null @@ -1,1372 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#include - -#include - -#include "src/api/api-inl.h" -#include "src/base/utils/random-number-generator.h" -#include "src/codegen/assembler-inl.h" -#include "src/codegen/macro-assembler.h" -#include "src/deoptimizer/deoptimizer.h" -#include "src/execution/simulator.h" -#include "src/objects/js-array-inl.h" -#include "src/objects/objects-inl.h" -#include "src/utils/ostreams.h" -#include "test/cctest/cctest.h" -#include "test/common/assembler-tester.h" - -namespace v8 { -namespace internal { - -// TODO(mips): Refine these signatures per test case. -using F1 = void*(int x, int p1, int p2, int p3, int p4); -using F3 = void*(void* p, int p1, int p2, int p3, int p4); -using F4 = void*(void* p0, void* p1, int p2, int p3, int p4); - -#define __ masm-> - -TEST(BYTESWAP) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - struct T { - uint32_t s4; - uint32_t s2; - uint32_t u2; - }; - - T t; - uint32_t test_values[] = {0x5612FFCD, 0x9D327ACC, 0x781A15C3, 0xFCDE, 0x9F, - 0xC81A15C3, 0x80000000, 0xFFFFFFFF, 0x00008000}; - - MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); - - MacroAssembler* masm = &assembler; - - __ lw(a1, MemOperand(a0, offsetof(T, s4))); - __ nop(); - __ ByteSwapSigned(a1, a1, 4); - __ sw(a1, MemOperand(a0, offsetof(T, s4))); - - __ lw(a1, MemOperand(a0, offsetof(T, s2))); - __ nop(); - __ ByteSwapSigned(a1, a1, 2); - __ sw(a1, MemOperand(a0, offsetof(T, s2))); - - __ lw(a1, MemOperand(a0, offsetof(T, u2))); - __ nop(); - __ ByteSwapUnsigned(a1, a1, 2); - __ sw(a1, MemOperand(a0, offsetof(T, u2))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - masm->GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - - for (size_t i = 0; i < arraysize(test_values); i++) { - int16_t in_s2 = static_cast(test_values[i]); - uint16_t in_u2 = static_cast(test_values[i]); - - t.s4 = test_values[i]; - t.s2 = static_cast(in_s2); - t.u2 = static_cast(in_u2); - - f.Call(&t, 0, 0, 0, 0); - - CHECK_EQ(ByteReverse(test_values[i]), t.s4); - CHECK_EQ(ByteReverse(in_s2), static_cast(t.s2)); - CHECK_EQ(ByteReverse(in_u2), static_cast(t.u2)); - } -} - -static void TestNaN(const char *code) { - // NaN value is different on MIPS and x86 architectures, and TEST(NaNx) - // tests checks the case where a x86 NaN value is serialized into the - // snapshot on the simulator during cross compilation. - v8::HandleScope scope(CcTest::isolate()); - v8::Local context = CcTest::NewContext({PRINT_EXTENSION_ID}); - v8::Context::Scope context_scope(context); - - v8::Local script = - v8::Script::Compile(context, v8_str(code)).ToLocalChecked(); - v8::Local result = - v8::Local::Cast(script->Run(context).ToLocalChecked()); - i::Handle o = v8::Utils::OpenHandle(*result); - i::Handle array1(i::JSArray::cast(*o), o->GetIsolate()); - i::FixedDoubleArray a = i::FixedDoubleArray::cast(array1->elements()); - double value = a.get_scalar(0); - CHECK(std::isnan(value) && - base::bit_cast(value) == - base::bit_cast(std::numeric_limits::quiet_NaN())); -} - - -TEST(NaN0) { - TestNaN( - "var result;" - "for (var i = 0; i < 2; i++) {" - " result = new Array(Number.NaN, Number.POSITIVE_INFINITY);" - "}" - "result;"); -} - - -TEST(NaN1) { - TestNaN( - "var result;" - "for (var i = 0; i < 2; i++) {" - " result = [NaN];" - "}" - "result;"); -} - - -TEST(jump_tables4) { - // Similar to test-assembler-mips jump_tables1, with extra test for branch - // trampoline required before emission of the dd table (where trampolines are - // blocked), and proper transition to long-branch mode. - // Regression test for v8:4294. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assembler; - - const int kNumCases = 512; - int values[kNumCases]; - isolate->random_number_generator()->NextBytes(values, sizeof(values)); - Label labels[kNumCases]; - Label near_start, end, done; - - __ Push(ra); - __ mov(v0, zero_reg); - - __ Branch(&end); - __ bind(&near_start); - - // Generate slightly less than 32K instructions, which will soon require - // trampoline for branch distance fixup. - for (int i = 0; i < 32768 - 256; ++i) { - __ addiu(v0, v0, 1); - } - - __ GenerateSwitchTable(a0, kNumCases, - [&labels](size_t i) { return labels + i; }); - - for (int i = 0; i < kNumCases; ++i) { - __ bind(&labels[i]); - __ li(v0, values[i]); - __ Branch(&done); - } - - __ bind(&done); - __ Pop(ra); - __ jr(ra); - __ nop(); - - __ bind(&end); - __ Branch(&near_start); - - CodeDesc desc; - masm->GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kNumCases; ++i) { - int res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); - ::printf("f(%d) = %d\n", i, res); - CHECK_EQ(values[i], res); - } -} - - -TEST(jump_tables5) { - if (!IsMipsArchVariant(kMips32r6)) return; - - // Similar to test-assembler-mips jump_tables1, with extra test for emitting a - // compact branch instruction before emission of the dd table. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assembler; - - const int kNumCases = 512; - int values[kNumCases]; - isolate->random_number_generator()->NextBytes(values, sizeof(values)); - Label labels[kNumCases]; - Label done; - - __ Push(ra); - - { - __ BlockTrampolinePoolFor(kNumCases + 6 + 1); - - __ addiupc(at, 6 + 1); - __ Lsa(at, at, a0, 2); - __ lw(at, MemOperand(at)); - __ jalr(at); - __ nop(); // Branch delay slot nop. - __ bc(&done); - // A nop instruction must be generated by the forbidden slot guard - // (Assembler::dd(Label*)). - for (int i = 0; i < kNumCases; ++i) { - __ dd(&labels[i]); - } - } - - for (int i = 0; i < kNumCases; ++i) { - __ bind(&labels[i]); - __ li(v0, values[i]); - __ jr(ra); - __ nop(); - } - - __ bind(&done); - __ Pop(ra); - __ jr(ra); - __ nop(); - - CodeDesc desc; - masm->GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kNumCases; ++i) { - int32_t res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); - ::printf("f(%d) = %d\n", i, res); - CHECK_EQ(values[i], res); - } -} - -TEST(jump_tables6) { - // Similar to test-assembler-mips jump_tables1, with extra test for branch - // trampoline required after emission of the dd table (where trampolines are - // blocked). This test checks if number of really generated instructions is - // greater than number of counted instructions from code, as we are expecting - // generation of trampoline in this case (when number of kFillInstr - // instructions is close to 32K) - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assembler; - - const int kSwitchTableCases = 40; - - const int kMaxBranchOffset = Assembler::kMaxBranchOffset; - const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize; - const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize; - - const int kMaxOffsetForTrampolineStart = - kMaxBranchOffset - 16 * kTrampolineSlotsSize; - const int kFillInstr = (kMaxOffsetForTrampolineStart / kInstrSize) - - (kSwitchTablePrologueSize + kSwitchTableCases) - 20; - - int values[kSwitchTableCases]; - isolate->random_number_generator()->NextBytes(values, sizeof(values)); - Label labels[kSwitchTableCases]; - Label near_start, end, done; - - __ Push(ra); - __ mov(v0, zero_reg); - - int offs1 = masm->pc_offset(); - int gen_insn = 0; - - __ Branch(&end); - gen_insn += Assembler::IsCompactBranchSupported() ? 1 : 2; - __ bind(&near_start); - - // Generate slightly less than 32K instructions, which will soon require - // trampoline for branch distance fixup. - for (int i = 0; i < kFillInstr; ++i) { - __ addiu(v0, v0, 1); - } - gen_insn += kFillInstr; - - __ GenerateSwitchTable(a0, kSwitchTableCases, - [&labels](size_t i) { return labels + i; }); - gen_insn += (kSwitchTablePrologueSize + kSwitchTableCases); - - for (int i = 0; i < kSwitchTableCases; ++i) { - __ bind(&labels[i]); - __ li(v0, values[i]); - __ Branch(&done); - } - gen_insn += - ((Assembler::IsCompactBranchSupported() ? 3 : 4) * kSwitchTableCases); - - // If offset from here to first branch instr is greater than max allowed - // offset for trampoline ... - CHECK_LT(kMaxOffsetForTrampolineStart, masm->pc_offset() - offs1); - // ... number of generated instructions must be greater then "gen_insn", - // as we are expecting trampoline generation - CHECK_LT(gen_insn, (masm->pc_offset() - offs1) / kInstrSize); - - __ bind(&done); - __ Pop(ra); - __ jr(ra); - __ nop(); - - __ bind(&end); - __ Branch(&near_start); - - CodeDesc desc; - masm->GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); -#ifdef OBJECT_PRINT - code->Print(std::cout); -#endif - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kSwitchTableCases; ++i) { - int res = reinterpret_cast(f.Call(i, 0, 0, 0, 0)); - ::printf("f(%d) = %d\n", i, res); - CHECK_EQ(values[i], res); - } -} - -static uint32_t run_lsa(uint32_t rt, uint32_t rs, int8_t sa) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assembler; - - __ Lsa(v0, a0, a1, sa); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assembler.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - uint32_t res = reinterpret_cast(f.Call(rt, rs, 0, 0, 0)); - - return res; -} - - -TEST(Lsa) { - CcTest::InitializeVM(); - struct TestCaseLsa { - int32_t rt; - int32_t rs; - uint8_t sa; - uint32_t expected_res; - }; - - struct TestCaseLsa tc[] = {// rt, rs, sa, expected_res - {0x4, 0x1, 1, 0x6}, - {0x4, 0x1, 2, 0x8}, - {0x4, 0x1, 3, 0xC}, - {0x4, 0x1, 4, 0x14}, - {0x4, 0x1, 5, 0x24}, - {0x0, 0x1, 1, 0x2}, - {0x0, 0x1, 2, 0x4}, - {0x0, 0x1, 3, 0x8}, - {0x0, 0x1, 4, 0x10}, - {0x0, 0x1, 5, 0x20}, - {0x4, 0x0, 1, 0x4}, - {0x4, 0x0, 2, 0x4}, - {0x4, 0x0, 3, 0x4}, - {0x4, 0x0, 4, 0x4}, - {0x4, 0x0, 5, 0x4}, - - // Shift overflow. - {0x4, INT32_MAX, 1, 0x2}, - {0x4, INT32_MAX >> 1, 2, 0x0}, - {0x4, INT32_MAX >> 2, 3, 0xFFFFFFFC}, - {0x4, INT32_MAX >> 3, 4, 0xFFFFFFF4}, - {0x4, INT32_MAX >> 4, 5, 0xFFFFFFE4}, - - // Signed addition overflow. - {INT32_MAX - 1, 0x1, 1, 0x80000000}, - {INT32_MAX - 3, 0x1, 2, 0x80000000}, - {INT32_MAX - 7, 0x1, 3, 0x80000000}, - {INT32_MAX - 15, 0x1, 4, 0x80000000}, - {INT32_MAX - 31, 0x1, 5, 0x80000000}, - - // Addition overflow. - {-2, 0x1, 1, 0x0}, - {-4, 0x1, 2, 0x0}, - {-8, 0x1, 3, 0x0}, - {-16, 0x1, 4, 0x0}, - {-32, 0x1, 5, 0x0}}; - - size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseLsa); - for (size_t i = 0; i < nr_test_cases; ++i) { - uint32_t res = run_lsa(tc[i].rt, tc[i].rs, tc[i].sa); - PrintF("0x%x =? 0x%x == lsa(v0, %x, %x, %hhu)\n", tc[i].expected_res, res, - tc[i].rt, tc[i].rs, tc[i].sa); - CHECK_EQ(tc[i].expected_res, res); - } -} - -static const std::vector cvt_trunc_uint32_test_values() { - static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00FFFF00, - 0x7FFFFFFF, 0x80000000, 0x80000001, - 0x80FFFF00, 0x8FFFFFFF, 0xFFFFFFFF}; - return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -} - -static const std::vector cvt_trunc_int32_test_values() { - static const int32_t kValues[] = { - static_cast(0x00000000), static_cast(0x00000001), - static_cast(0x00FFFF00), static_cast(0x7FFFFFFF), - static_cast(0x80000000), static_cast(0x80000001), - static_cast(0x80FFFF00), static_cast(0x8FFFFFFF), - static_cast(0xFFFFFFFF)}; - return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -} - -// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... } -#define FOR_INPUTS(ctype, itype, var, test_vector) \ - std::vector var##_vec = test_vector(); \ - for (std::vector::iterator var = var##_vec.begin(); \ - var != var##_vec.end(); ++var) - -#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \ - std::vector var##_vec = test_vector(); \ - std::vector::iterator var; \ - std::vector::reverse_iterator var2; \ - for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \ - var != var##_vec.end(); ++var, ++var2) - -#define FOR_ENUM_INPUTS(var, type, test_vector) \ - FOR_INPUTS(enum type, type, var, test_vector) -#define FOR_STRUCT_INPUTS(var, type, test_vector) \ - FOR_INPUTS(struct type, type, var, test_vector) -#define FOR_UINT32_INPUTS(var, test_vector) \ - FOR_INPUTS(uint32_t, uint32, var, test_vector) -#define FOR_INT32_INPUTS(var, test_vector) \ - FOR_INPUTS(int32_t, int32, var, test_vector) -#define FOR_INT32_INPUTS2(var, var2, test_vector) \ - FOR_INPUTS2(int32_t, int32, var, var2, test_vector) - -#define FOR_UINT64_INPUTS(var, test_vector) \ - FOR_INPUTS(uint64_t, uint32, var, test_vector) - -template -RET_TYPE run_Cvt(IN_TYPE x, Func GenerateConvertInstructionFunc) { - using F_CVT = RET_TYPE(IN_TYPE x0, int x1, int x2, int x3, int x4); - - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assm; - - __ mtc1(a0, f4); - GenerateConvertInstructionFunc(masm); - __ mfc1(v0, f2); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - return reinterpret_cast(f.Call(x, 0, 0, 0, 0)); -} - -TEST(cvt_s_w_Trunc_uw_s) { - CcTest::InitializeVM(); - FOR_UINT32_INPUTS(i, cvt_trunc_uint32_test_values) { - uint32_t input = *i; - auto fn = [](MacroAssembler* masm) { - __ cvt_s_w(f0, f4); - __ Trunc_uw_s(f2, f0, f6); - }; - CHECK_EQ(static_cast(input), run_Cvt(input, fn)); - } -} - -TEST(cvt_d_w_Trunc_w_d) { - CcTest::InitializeVM(); - FOR_INT32_INPUTS(i, cvt_trunc_int32_test_values) { - int32_t input = *i; - auto fn = [](MacroAssembler* masm) { - __ cvt_d_w(f0, f4); - __ Trunc_w_d(f2, f0); - }; - CHECK_EQ(static_cast(input), run_Cvt(input, fn)); - } -} - -static const std::vector overflow_int32_test_values() { - static const int32_t kValues[] = { - static_cast(0xF0000000), static_cast(0x00000001), - static_cast(0xFF000000), static_cast(0x0000F000), - static_cast(0x0F000000), static_cast(0x991234AB), - static_cast(0xB0FFFF01), static_cast(0x00006FFF), - static_cast(0xFFFFFFFF)}; - return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -} - -TEST(OverflowInstructions) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope handles(isolate); - - struct T { - int32_t lhs; - int32_t rhs; - int32_t output_add; - int32_t output_add2; - int32_t output_sub; - int32_t output_sub2; - int32_t output_mul; - int32_t output_mul2; - int32_t overflow_add; - int32_t overflow_add2; - int32_t overflow_sub; - int32_t overflow_sub2; - int32_t overflow_mul; - int32_t overflow_mul2; - }; - T t; - - FOR_INT32_INPUTS(i, overflow_int32_test_values) { - FOR_INT32_INPUTS(j, overflow_int32_test_values) { - int32_t ii = *i; - int32_t jj = *j; - int32_t expected_add, expected_sub, expected_mul; - bool expected_add_ovf, expected_sub_ovf, expected_mul_ovf; - MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assembler; - - __ lw(t0, MemOperand(a0, offsetof(T, lhs))); - __ lw(t1, MemOperand(a0, offsetof(T, rhs))); - - __ AddOverflow(t2, t0, Operand(t1), t3); - __ sw(t2, MemOperand(a0, offsetof(T, output_add))); - __ sw(t3, MemOperand(a0, offsetof(T, overflow_add))); - __ mov(t3, zero_reg); - __ AddOverflow(t0, t0, Operand(t1), t3); - __ sw(t0, MemOperand(a0, offsetof(T, output_add2))); - __ sw(t3, MemOperand(a0, offsetof(T, overflow_add2))); - - __ lw(t0, MemOperand(a0, offsetof(T, lhs))); - __ lw(t1, MemOperand(a0, offsetof(T, rhs))); - - __ SubOverflow(t2, t0, Operand(t1), t3); - __ sw(t2, MemOperand(a0, offsetof(T, output_sub))); - __ sw(t3, MemOperand(a0, offsetof(T, overflow_sub))); - __ mov(t3, zero_reg); - __ SubOverflow(t0, t0, Operand(t1), t3); - __ sw(t0, MemOperand(a0, offsetof(T, output_sub2))); - __ sw(t3, MemOperand(a0, offsetof(T, overflow_sub2))); - - __ lw(t0, MemOperand(a0, offsetof(T, lhs))); - __ lw(t1, MemOperand(a0, offsetof(T, rhs))); - - __ MulOverflow(t2, t0, Operand(t1), t3); - __ sw(t2, MemOperand(a0, offsetof(T, output_mul))); - __ sw(t3, MemOperand(a0, offsetof(T, overflow_mul))); - __ mov(t3, zero_reg); - __ MulOverflow(t0, t0, Operand(t1), t3); - __ sw(t0, MemOperand(a0, offsetof(T, output_mul2))); - __ sw(t3, MemOperand(a0, offsetof(T, overflow_mul2))); - - __ jr(ra); - __ nop(); - - CodeDesc desc; - masm->GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - t.lhs = ii; - t.rhs = jj; - f.Call(&t, 0, 0, 0, 0); - - expected_add_ovf = base::bits::SignedAddOverflow32(ii, jj, &expected_add); - expected_sub_ovf = base::bits::SignedSubOverflow32(ii, jj, &expected_sub); - expected_mul_ovf = base::bits::SignedMulOverflow32(ii, jj, &expected_mul); - - CHECK_EQ(expected_add_ovf, t.overflow_add < 0); - CHECK_EQ(expected_sub_ovf, t.overflow_sub < 0); - CHECK_EQ(expected_mul_ovf, t.overflow_mul != 0); - - CHECK_EQ(t.overflow_add, t.overflow_add2); - CHECK_EQ(t.overflow_sub, t.overflow_sub2); - CHECK_EQ(t.overflow_mul, t.overflow_mul2); - - CHECK_EQ(expected_add, t.output_add); - CHECK_EQ(expected_add, t.output_add2); - CHECK_EQ(expected_sub, t.output_sub); - CHECK_EQ(expected_sub, t.output_sub2); - if (!expected_mul_ovf) { - CHECK_EQ(expected_mul, t.output_mul); - CHECK_EQ(expected_mul, t.output_mul2); - } - } - } -} - - -TEST(min_max_nan) { - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assembler; - - struct TestFloat { - double a; - double b; - double c; - double d; - float e; - float f; - float g; - float h; - }; - - TestFloat test; - const double dnan = std::numeric_limits::quiet_NaN(); - const double dinf = std::numeric_limits::infinity(); - const double dminf = -std::numeric_limits::infinity(); - const float fnan = std::numeric_limits::quiet_NaN(); - const float finf = std::numeric_limits::infinity(); - const float fminf = std::numeric_limits::infinity(); - const int kTableLength = 13; - - double inputsa[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, dinf, dminf, - dinf, dnan, 3.0, dinf, dnan, dnan}; - double inputsb[kTableLength] = {3.0, 2.0, 0.0, -0.0, dinf, 42.0, dinf, - dminf, 3.0, dnan, dnan, dinf, dnan}; - double outputsdmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0, - 42.0, dminf, dminf, dnan, dnan, - dnan, dnan, dnan}; - double outputsdmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf, - dinf, dnan, dnan, dnan, dnan, dnan}; - - float inputse[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, finf, fminf, - finf, fnan, 3.0, finf, fnan, fnan}; - float inputsf[kTableLength] = {3.0, 2.0, 0.0, -0.0, finf, 42.0, finf, - fminf, 3.0, fnan, fnan, finf, fnan}; - float outputsfmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0, 42.0, fminf, - fminf, fnan, fnan, fnan, fnan, fnan}; - float outputsfmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, finf, finf, finf, - finf, fnan, fnan, fnan, fnan, fnan}; - - auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) { - __ bind(nan); - __ LoadRoot(t8, RootIndex::kNanValue); - __ Ldc1(dst, FieldMemOperand(t8, HeapNumber::kValueOffset)); - __ Branch(back); - }; - - auto handle_snan = [masm, fnan](FPURegister dst, Label* nan, Label* back) { - __ bind(nan); - __ Move(dst, fnan); - __ Branch(back); - }; - - Label handle_mind_nan, handle_maxd_nan, handle_mins_nan, handle_maxs_nan; - Label back_mind_nan, back_maxd_nan, back_mins_nan, back_maxs_nan; - - __ push(s6); - __ InitializeRootRegister(); - __ Ldc1(f4, MemOperand(a0, offsetof(TestFloat, a))); - __ Ldc1(f8, MemOperand(a0, offsetof(TestFloat, b))); - __ lwc1(f2, MemOperand(a0, offsetof(TestFloat, e))); - __ lwc1(f6, MemOperand(a0, offsetof(TestFloat, f))); - __ Float64Min(f10, f4, f8, &handle_mind_nan); - __ bind(&back_mind_nan); - __ Float64Max(f12, f4, f8, &handle_maxd_nan); - __ bind(&back_maxd_nan); - __ Float32Min(f14, f2, f6, &handle_mins_nan); - __ bind(&back_mins_nan); - __ Float32Max(f16, f2, f6, &handle_maxs_nan); - __ bind(&back_maxs_nan); - __ Sdc1(f10, MemOperand(a0, offsetof(TestFloat, c))); - __ Sdc1(f12, MemOperand(a0, offsetof(TestFloat, d))); - __ swc1(f14, MemOperand(a0, offsetof(TestFloat, g))); - __ swc1(f16, MemOperand(a0, offsetof(TestFloat, h))); - __ pop(s6); - __ jr(ra); - __ nop(); - - handle_dnan(f10, &handle_mind_nan, &back_mind_nan); - handle_dnan(f12, &handle_maxd_nan, &back_maxd_nan); - handle_snan(f14, &handle_mins_nan, &back_mins_nan); - handle_snan(f16, &handle_maxs_nan, &back_maxs_nan); - - CodeDesc desc; - masm->GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - auto f = GeneratedCode::FromCode(*code); - for (int i = 0; i < kTableLength; i++) { - test.a = inputsa[i]; - test.b = inputsb[i]; - test.e = inputse[i]; - test.f = inputsf[i]; - - f.Call(&test, 0, 0, 0, 0); - - CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c))); - CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d))); - CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g))); - CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h))); - } -} - -template -bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset, - IN_TYPE value, Func GenerateUnalignedInstructionFunc) { - using F_CVT = int32_t(char* x0, int x1, int x2, int x3, int x4); - - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assm; - IN_TYPE res; - - GenerateUnalignedInstructionFunc(masm, in_offset, out_offset); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - - MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE)); - f.Call(memory_buffer, 0, 0, 0, 0); - MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE)); - - return res == value; -} - -static const std::vector unsigned_test_values() { - static const uint64_t kValues[] = { - 0x2180F18A06384414, 0x000A714532102277, 0xBC1ACCCF180649F0, - 0x8000000080008000, 0x0000000000000001, 0xFFFFFFFFFFFFFFFF, - }; - return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -} - -static const std::vector unsigned_test_offset() { - static const int32_t kValues[] = {// value, offset - -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB}; - return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -} - -static const std::vector unsigned_test_offset_increment() { - static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5}; - return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -} - -TEST(Ulh) { - CcTest::InitializeVM(); - - static const int kBufferSize = 300 * KB; - char memory_buffer[kBufferSize]; - char* buffer_middle = memory_buffer + (kBufferSize / 2); - - FOR_UINT64_INPUTS(i, unsigned_test_values) { - FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { - FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { - uint16_t value = static_cast(*i & 0xFFFF); - int32_t in_offset = *j1 + *k1; - int32_t out_offset = *j2 + *k2; - - auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, - int32_t out_offset) { - __ Ulh(v0, MemOperand(a0, in_offset)); - __ Ush(v0, MemOperand(a0, out_offset), v0); - }; - CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, - out_offset, value, fn_1)); - - auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, - int32_t out_offset) { - __ mov(t0, a0); - __ Ulh(a0, MemOperand(a0, in_offset)); - __ Ush(a0, MemOperand(t0, out_offset), v0); - }; - CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, - out_offset, value, fn_2)); - - auto fn_3 = [](MacroAssembler* masm, int32_t in_offset, - int32_t out_offset) { - __ mov(t0, a0); - __ Ulhu(a0, MemOperand(a0, in_offset)); - __ Ush(a0, MemOperand(t0, out_offset), t1); - }; - CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, - out_offset, value, fn_3)); - - auto fn_4 = [](MacroAssembler* masm, int32_t in_offset, - int32_t out_offset) { - __ Ulhu(v0, MemOperand(a0, in_offset)); - __ Ush(v0, MemOperand(a0, out_offset), t1); - }; - CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, - out_offset, value, fn_4)); - } - } - } -} - -TEST(Ulh_bitextension) { - CcTest::InitializeVM(); - - static const int kBufferSize = 300 * KB; - char memory_buffer[kBufferSize]; - char* buffer_middle = memory_buffer + (kBufferSize / 2); - - FOR_UINT64_INPUTS(i, unsigned_test_values) { - FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { - FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { - uint16_t value = static_cast(*i & 0xFFFF); - int32_t in_offset = *j1 + *k1; - int32_t out_offset = *j2 + *k2; - - auto fn = [](MacroAssembler* masm, int32_t in_offset, - int32_t out_offset) { - Label success, fail, end, different; - __ Ulh(t0, MemOperand(a0, in_offset)); - __ Ulhu(t1, MemOperand(a0, in_offset)); - __ Branch(&different, ne, t0, Operand(t1)); - - // If signed and unsigned values are same, check - // the upper bits to see if they are zero - __ sra(t0, t0, 15); - __ Branch(&success, eq, t0, Operand(zero_reg)); - __ Branch(&fail); - - // If signed and unsigned values are different, - // check that the upper bits are complementary - __ bind(&different); - __ sra(t1, t1, 15); - __ Branch(&fail, ne, t1, Operand(1)); - __ sra(t0, t0, 15); - __ addiu(t0, t0, 1); - __ Branch(&fail, ne, t0, Operand(zero_reg)); - // Fall through to success - - __ bind(&success); - __ Ulh(t0, MemOperand(a0, in_offset)); - __ Ush(t0, MemOperand(a0, out_offset), v0); - __ Branch(&end); - __ bind(&fail); - __ Ush(zero_reg, MemOperand(a0, out_offset), v0); - __ bind(&end); - }; - CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, - out_offset, value, fn)); - } - } - } -} - -TEST(Ulw) { - CcTest::InitializeVM(); - - static const int kBufferSize = 300 * KB; - char memory_buffer[kBufferSize]; - char* buffer_middle = memory_buffer + (kBufferSize / 2); - - FOR_UINT64_INPUTS(i, unsigned_test_values) { - FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { - FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { - uint32_t value = static_cast(*i & 0xFFFFFFFF); - int32_t in_offset = *j1 + *k1; - int32_t out_offset = *j2 + *k2; - - auto fn_1 = [](MacroAssembler* masm, int32_t in_offset, - int32_t out_offset) { - __ Ulw(v0, MemOperand(a0, in_offset)); - __ Usw(v0, MemOperand(a0, out_offset)); - }; - CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, - out_offset, value, fn_1)); - - auto fn_2 = [](MacroAssembler* masm, int32_t in_offset, - int32_t out_offset) { - __ mov(t0, a0); - __ Ulw(a0, MemOperand(a0, in_offset)); - __ Usw(a0, MemOperand(t0, out_offset)); - }; - CHECK_EQ(true, - run_Unaligned(buffer_middle, in_offset, out_offset, - (uint32_t)value, fn_2)); - } - } - } -} - -TEST(Ulwc1) { - CcTest::InitializeVM(); - - static const int kBufferSize = 300 * KB; - char memory_buffer[kBufferSize]; - char* buffer_middle = memory_buffer + (kBufferSize / 2); - - FOR_UINT64_INPUTS(i, unsigned_test_values) { - FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { - FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { - float value = static_cast(*i & 0xFFFFFFFF); - int32_t in_offset = *j1 + *k1; - int32_t out_offset = *j2 + *k2; - - auto fn = [](MacroAssembler* masm, int32_t in_offset, - int32_t out_offset) { - __ Ulwc1(f0, MemOperand(a0, in_offset), t0); - __ Uswc1(f0, MemOperand(a0, out_offset), t0); - }; - CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, - out_offset, value, fn)); - } - } - } -} - -TEST(Uldc1) { - CcTest::InitializeVM(); - - static const int kBufferSize = 300 * KB; - char memory_buffer[kBufferSize]; - char* buffer_middle = memory_buffer + (kBufferSize / 2); - - FOR_UINT64_INPUTS(i, unsigned_test_values) { - FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) { - FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) { - double value = static_cast(*i); - int32_t in_offset = *j1 + *k1; - int32_t out_offset = *j2 + *k2; - - auto fn = [](MacroAssembler* masm, int32_t in_offset, - int32_t out_offset) { - __ Uldc1(f0, MemOperand(a0, in_offset), t0); - __ Usdc1(f0, MemOperand(a0, out_offset), t0); - }; - CHECK_EQ(true, run_Unaligned(buffer_middle, in_offset, - out_offset, value, fn)); - } - } - } -} - -static const std::vector sltu_test_values() { - static const uint32_t kValues[] = { - 0, 1, 0x7FFE, 0x7FFF, 0x8000, - 0x8001, 0xFFFE, 0xFFFF, 0xFFFF7FFE, 0xFFFF7FFF, - 0xFFFF8000, 0xFFFF8001, 0xFFFFFFFE, 0xFFFFFFFF, - }; - return std::vector(&kValues[0], &kValues[arraysize(kValues)]); -} - -template -bool run_Sltu(uint32_t rs, uint32_t rd, Func GenerateSltuInstructionFunc) { - using F_CVT = int32_t(uint32_t x0, uint32_t x1, int x2, int x3, int x4); - - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assm; - - GenerateSltuInstructionFunc(masm, rd); - __ jr(ra); - __ nop(); - - CodeDesc desc; - assm.GetCode(isolate, &desc); - Handle code = - Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build(); - - auto f = GeneratedCode::FromCode(*code); - int32_t res = reinterpret_cast(f.Call(rs, rd, 0, 0, 0)); - return res == 1; -} - -TEST(Sltu) { - CcTest::InitializeVM(); - - FOR_UINT32_INPUTS(i, sltu_test_values) { - FOR_UINT32_INPUTS(j, sltu_test_values) { - uint32_t rs = *i; - uint32_t rd = *j; - - auto fn_1 = [](MacroAssembler* masm, uint32_t imm) { - __ Sltu(v0, a0, Operand(imm)); - }; - CHECK_EQ(rs < rd, run_Sltu(rs, rd, fn_1)); - - auto fn_2 = [](MacroAssembler* masm, uint32_t imm) { - __ Sltu(v0, a0, a1); - }; - CHECK_EQ(rs < rd, run_Sltu(rs, rd, fn_2)); - } - } -} - -template -static GeneratedCode GenerateMacroFloat32MinMax(MacroAssembler* masm) { - T a = T::from_code(4); // f4 - T b = T::from_code(6); // f6 - T c = T::from_code(8); // f8 - - Label ool_min_abc, ool_min_aab, ool_min_aba; - Label ool_max_abc, ool_max_aab, ool_max_aba; - - Label done_min_abc, done_min_aab, done_min_aba; - Label done_max_abc, done_max_aab, done_max_aba; - -#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \ - __ lwc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \ - __ lwc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \ - __ fminmax(res, x, y, &ool); \ - __ bind(&done); \ - __ swc1(a, MemOperand(a1, offsetof(Results, res_field))) - - // a = min(b, c); - FLOAT_MIN_MAX(Float32Min, a, b, c, done_min_abc, ool_min_abc, min_abc_); - // a = min(a, b); - FLOAT_MIN_MAX(Float32Min, a, a, b, done_min_aab, ool_min_aab, min_aab_); - // a = min(b, a); - FLOAT_MIN_MAX(Float32Min, a, b, a, done_min_aba, ool_min_aba, min_aba_); - - // a = max(b, c); - FLOAT_MIN_MAX(Float32Max, a, b, c, done_max_abc, ool_max_abc, max_abc_); - // a = max(a, b); - FLOAT_MIN_MAX(Float32Max, a, a, b, done_max_aab, ool_max_aab, max_aab_); - // a = max(b, a); - FLOAT_MIN_MAX(Float32Max, a, b, a, done_max_aba, ool_max_aba, max_aba_); - -#undef FLOAT_MIN_MAX - - __ jr(ra); - __ nop(); - - // Generate out-of-line cases. - __ bind(&ool_min_abc); - __ Float32MinOutOfLine(a, b, c); - __ Branch(&done_min_abc); - - __ bind(&ool_min_aab); - __ Float32MinOutOfLine(a, a, b); - __ Branch(&done_min_aab); - - __ bind(&ool_min_aba); - __ Float32MinOutOfLine(a, b, a); - __ Branch(&done_min_aba); - - __ bind(&ool_max_abc); - __ Float32MaxOutOfLine(a, b, c); - __ Branch(&done_max_abc); - - __ bind(&ool_max_aab); - __ Float32MaxOutOfLine(a, a, b); - __ Branch(&done_max_aab); - - __ bind(&ool_max_aba); - __ Float32MaxOutOfLine(a, b, a); - __ Branch(&done_max_aba); - - CodeDesc desc; - masm->GetCode(masm->isolate(), &desc); - Handle code = - Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING) - .Build(); -#ifdef DEBUG - StdoutStream os; - code->Print(os); -#endif - return GeneratedCode::FromCode(*code); -} - -TEST(macro_float_minmax_f32) { - // Test the Float32Min and Float32Max macros. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assembler; - - struct Inputs { - float src1_; - float src2_; - }; - - struct Results { - // Check all register aliasing possibilities in order to exercise all - // code-paths in the macro assembler. - float min_abc_; - float min_aab_; - float min_aba_; - float max_abc_; - float max_aab_; - float max_aba_; - }; - - GeneratedCode f = - GenerateMacroFloat32MinMax(masm); - -#define CHECK_MINMAX(src1, src2, min, max) \ - do { \ - Inputs inputs = {src1, src2}; \ - Results results; \ - f.Call(&inputs, &results, 0, 0, 0); \ - CHECK_EQ(base::bit_cast(min), \ - base::bit_cast(results.min_abc_)); \ - CHECK_EQ(base::bit_cast(min), \ - base::bit_cast(results.min_aab_)); \ - CHECK_EQ(base::bit_cast(min), \ - base::bit_cast(results.min_aba_)); \ - CHECK_EQ(base::bit_cast(max), \ - base::bit_cast(results.max_abc_)); \ - CHECK_EQ(base::bit_cast(max), \ - base::bit_cast(results.max_aab_)); \ - CHECK_EQ(base::bit_cast(max), \ - base::bit_cast(results.max_aba_)); \ - /* Use a base::bit_cast to correctly identify -0.0 and NaNs. */ \ - } while (0) - - float nan_a = std::numeric_limits::quiet_NaN(); - float nan_b = std::numeric_limits::quiet_NaN(); - - CHECK_MINMAX(1.0f, -1.0f, -1.0f, 1.0f); - CHECK_MINMAX(-1.0f, 1.0f, -1.0f, 1.0f); - CHECK_MINMAX(0.0f, -1.0f, -1.0f, 0.0f); - CHECK_MINMAX(-1.0f, 0.0f, -1.0f, 0.0f); - CHECK_MINMAX(-0.0f, -1.0f, -1.0f, -0.0f); - CHECK_MINMAX(-1.0f, -0.0f, -1.0f, -0.0f); - CHECK_MINMAX(0.0f, 1.0f, 0.0f, 1.0f); - CHECK_MINMAX(1.0f, 0.0f, 0.0f, 1.0f); - - CHECK_MINMAX(0.0f, 0.0f, 0.0f, 0.0f); - CHECK_MINMAX(-0.0f, -0.0f, -0.0f, -0.0f); - CHECK_MINMAX(-0.0f, 0.0f, -0.0f, 0.0f); - CHECK_MINMAX(0.0f, -0.0f, -0.0f, 0.0f); - - CHECK_MINMAX(0.0f, nan_a, nan_a, nan_a); - CHECK_MINMAX(nan_a, 0.0f, nan_a, nan_a); - CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a); - CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b); - -#undef CHECK_MINMAX -} - -template -static GeneratedCode GenerateMacroFloat64MinMax(MacroAssembler* masm) { - T a = T::from_code(4); // f4 - T b = T::from_code(6); // f6 - T c = T::from_code(8); // f8 - - Label ool_min_abc, ool_min_aab, ool_min_aba; - Label ool_max_abc, ool_max_aab, ool_max_aba; - - Label done_min_abc, done_min_aab, done_min_aba; - Label done_max_abc, done_max_aab, done_max_aba; - -#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \ - __ Ldc1(x, MemOperand(a0, offsetof(Inputs, src1_))); \ - __ Ldc1(y, MemOperand(a0, offsetof(Inputs, src2_))); \ - __ fminmax(res, x, y, &ool); \ - __ bind(&done); \ - __ Sdc1(a, MemOperand(a1, offsetof(Results, res_field))) - - // a = min(b, c); - FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_); - // a = min(a, b); - FLOAT_MIN_MAX(Float64Min, a, a, b, done_min_aab, ool_min_aab, min_aab_); - // a = min(b, a); - FLOAT_MIN_MAX(Float64Min, a, b, a, done_min_aba, ool_min_aba, min_aba_); - - // a = max(b, c); - FLOAT_MIN_MAX(Float64Max, a, b, c, done_max_abc, ool_max_abc, max_abc_); - // a = max(a, b); - FLOAT_MIN_MAX(Float64Max, a, a, b, done_max_aab, ool_max_aab, max_aab_); - // a = max(b, a); - FLOAT_MIN_MAX(Float64Max, a, b, a, done_max_aba, ool_max_aba, max_aba_); - -#undef FLOAT_MIN_MAX - - __ jr(ra); - __ nop(); - - // Generate out-of-line cases. - __ bind(&ool_min_abc); - __ Float64MinOutOfLine(a, b, c); - __ Branch(&done_min_abc); - - __ bind(&ool_min_aab); - __ Float64MinOutOfLine(a, a, b); - __ Branch(&done_min_aab); - - __ bind(&ool_min_aba); - __ Float64MinOutOfLine(a, b, a); - __ Branch(&done_min_aba); - - __ bind(&ool_max_abc); - __ Float64MaxOutOfLine(a, b, c); - __ Branch(&done_max_abc); - - __ bind(&ool_max_aab); - __ Float64MaxOutOfLine(a, a, b); - __ Branch(&done_max_aab); - - __ bind(&ool_max_aba); - __ Float64MaxOutOfLine(a, b, a); - __ Branch(&done_max_aba); - - CodeDesc desc; - masm->GetCode(masm->isolate(), &desc); - Handle code = - Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING) - .Build(); -#ifdef DEBUG - StdoutStream os; - code->Print(os); -#endif - return GeneratedCode::FromCode(*code); -} - -TEST(macro_float_minmax_f64) { - // Test the Float64Min and Float64Max macros. - CcTest::InitializeVM(); - Isolate* isolate = CcTest::i_isolate(); - HandleScope scope(isolate); - - MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes); - MacroAssembler* masm = &assembler; - - struct Inputs { - double src1_; - double src2_; - }; - - struct Results { - // Check all register aliasing possibilities in order to exercise all - // code-paths in the macro assembler. - double min_abc_; - double min_aab_; - double min_aba_; - double max_abc_; - double max_aab_; - double max_aba_; - }; - - GeneratedCode f = - GenerateMacroFloat64MinMax(masm); - -#define CHECK_MINMAX(src1, src2, min, max) \ - do { \ - Inputs inputs = {src1, src2}; \ - Results results; \ - f.Call(&inputs, &results, 0, 0, 0); \ - CHECK_EQ(base::bit_cast(min), \ - base::bit_cast(results.min_abc_)); \ - CHECK_EQ(base::bit_cast(min), \ - base::bit_cast(results.min_aab_)); \ - CHECK_EQ(base::bit_cast(min), \ - base::bit_cast(results.min_aba_)); \ - CHECK_EQ(base::bit_cast(max), \ - base::bit_cast(results.max_abc_)); \ - CHECK_EQ(base::bit_cast(max), \ - base::bit_cast(results.max_aab_)); \ - CHECK_EQ(base::bit_cast(max), \ - base::bit_cast(results.max_aba_)); \ - /* Use a base::bit_cast to correctly identify -0.0 and NaNs. */ \ - } while (0) - - double nan_a = std::numeric_limits::quiet_NaN(); - double nan_b = std::numeric_limits::quiet_NaN(); - - CHECK_MINMAX(1.0, -1.0, -1.0, 1.0); - CHECK_MINMAX(-1.0, 1.0, -1.0, 1.0); - CHECK_MINMAX(0.0, -1.0, -1.0, 0.0); - CHECK_MINMAX(-1.0, 0.0, -1.0, 0.0); - CHECK_MINMAX(-0.0, -1.0, -1.0, -0.0); - CHECK_MINMAX(-1.0, -0.0, -1.0, -0.0); - CHECK_MINMAX(0.0, 1.0, 0.0, 1.0); - CHECK_MINMAX(1.0, 0.0, 0.0, 1.0); - - CHECK_MINMAX(0.0, 0.0, 0.0, 0.0); - CHECK_MINMAX(-0.0, -0.0, -0.0, -0.0); - CHECK_MINMAX(-0.0, 0.0, -0.0, 0.0); - CHECK_MINMAX(0.0, -0.0, -0.0, 0.0); - - CHECK_MINMAX(0.0, nan_a, nan_a, nan_a); - CHECK_MINMAX(nan_a, 0.0, nan_a, nan_a); - CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a); - CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b); - -#undef CHECK_MINMAX -} - -TEST(DeoptExitSizeIsFixed) { - Isolate* isolate = CcTest::i_isolate(); - HandleScope handles(isolate); - auto buffer = AllocateAssemblerBuffer(); - MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, - buffer->CreateView()); - static_assert(static_cast(kFirstDeoptimizeKind) == 0); - for (int i = 0; i < kDeoptimizeKindCount; i++) { - DeoptimizeKind kind = static_cast(i); - Label before_exit; - masm.bind(&before_exit); - Builtin target = Deoptimizer::GetDeoptimizationEntry(kind); - masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit, - nullptr); - CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit), - kind == DeoptimizeKind::kLazy ? Deoptimizer::kLazyDeoptExitSize - : Deoptimizer::kEagerDeoptExitSize); - } -} - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/test/debugger/debugger.status b/test/debugger/debugger.status index 0761b22051..0097ae7466 100644 --- a/test/debugger/debugger.status +++ b/test/debugger/debugger.status @@ -133,7 +133,7 @@ ############################################################################## # Tests requiring Sparkplug. -['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, loong64)', { +['arch not in (x64, arm64, ia32, arm, mips64el, loong64)', { 'regress/regress-crbug-1199681': [SKIP], 'debug/regress/regress-crbug-1357554': [SKIP] }], diff --git a/test/message/message.status b/test/message/message.status index 8ed2f79f9e..7bde812162 100644 --- a/test/message/message.status +++ b/test/message/message.status @@ -69,10 +69,10 @@ }], ################################################################################ -['arch == mips64el or arch == mipsel or arch == riscv64 or arch == loong64', { +['arch == mips64el or arch == riscv64 or arch == loong64', { # Tests that require Simd enabled. 'wasm-trace-memory': [SKIP], -}], # arch == mips64el or arch == mipsel or arch == riscv64 or arch == loong64 +}], # arch == mips64el or arch == riscv64 or arch == loong64 ############################################################################## ['no_simd_hardware == True', { diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index aa8a4fef85..1240b8da63 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -157,11 +157,11 @@ 'wasm/compare-exchange64-stress': [PASS, SLOW, NO_VARIANTS], # Very slow on ARM, MIPS, RISCV and LOONG, contains no architecture dependent code. - 'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, riscv32, loong64)', SKIP]], - 'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, riscv32, loong64)', SKIP]], - 'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, riscv32, loong64)', SKIP]], - 'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64, riscv32, loong64)', SKIP]], - 'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64,riscv32, loong64)', SKIP]], + 'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, mips64, riscv64, riscv32, loong64)', SKIP]], + 'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, mips64, riscv64, riscv32, loong64)', SKIP]], + 'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, mips64, riscv64, riscv32, loong64)', SKIP]], + 'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, riscv64, riscv32, loong64)', SKIP]], + 'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mips64el, riscv64,riscv32, loong64)', SKIP]], # TODO(bmeurer): Flaky timeouts (sometimes <1s, sometimes >3m). 'unicodelctest': [PASS, NO_VARIANTS], @@ -256,7 +256,7 @@ ############################################################################## # TODO(ahaas): Port multiple return values to MIPS, S390 and PPC -['arch in (mips, mips64, mipsel, mips64el, s390, s390x, ppc, ppc64)', { +['arch in (mips64, mips64el, s390, s390x, ppc, ppc64)', { 'wasm/multi-value': [SKIP], }], @@ -480,13 +480,13 @@ ############################################################################## # 32-bit platforms -['arch in (ia32, arm, mips, mipsel, riscv32)', { +['arch in (ia32, arm, riscv32)', { # Needs >2GB of available contiguous memory. 'wasm/grow-huge-memory': [SKIP], 'wasm/huge-memory': [SKIP], 'wasm/huge-typedarray': [SKIP], 'wasm/bigint-opt': [SKIP], -}], # 'arch in (ia32, arm, mips, mipsel, riscv32)' +}], # 'arch in (ia32, arm, riscv32)' ############################################################################## ['arch == arm64', { @@ -735,60 +735,22 @@ }], # 'arch == arm ############################################################################## -['arch in (mipsel, mips, mips64el, mips64) and not simulator_run', { +['arch in (mips64el, mips64) and not simulator_run', { # These tests fail occasionally on the buildbots because they consume # a large amount of memory if executed in parallel. Therefore we # run only a single instance of these tests 'regress/regress-crbug-514081': [PASS, NO_VARIANTS], 'regress/regress-599414-array-concat-fast-path': [PASS, NO_VARIANTS], 'array-functions-prototype-misc': [PASS, NO_VARIANTS], -}], # 'arch in (mipsel, mips, mips64el, mips64)' +}], # 'arch in (mips64el, mips64)' ############################################################################## -['arch in (mipsel, mips, mips64el, mips64, ppc, ppc64)', { +['arch in (mips64el, mips64, ppc, ppc64)', { # These tests fail because qNaN and sNaN values are encoded differently on # MIPS and ARM/x86 architectures 'wasm/float-constant-folding': [SKIP], }], -############################################################################## -['arch == mipsel or arch == mips', { - - # Slow tests which times out in debug mode. - 'try': [PASS, ['mode == debug', SKIP]], - 'array-constructor': [PASS, ['mode == debug', SKIP]], - - # Slow in release mode on MIPS. - 'compiler/regress-stacktrace-methods': [PASS, SLOW], - 'array-splice': [PASS, SLOW], - - # Long running test. - 'string-indexof-2': [PASS, SLOW], - - # Long running tests. Skipping because having them timeout takes too long on - # the buildbot. - 'compiler/alloc-number': [SKIP], - 'regress/regress-490': [SKIP], - 'regress/regress-create-exception': [SKIP], - 'regress/regress-3247124': [SKIP], - - # Requires bigger stack size in the Genesis and if stack size is increased, - # the test requires too much time to run. However, the problem test covers - # should be platform-independent. - 'regress/regress-1132': [SKIP], - - # Currently always deopt on minus zero - 'math-floor-of-div-minus-zero': [SKIP], - - # Requires too much memory on MIPS. - 'regress/regress-779407': [SKIP], - 'harmony/bigint/regressions': [SKIP], - - # Pre-r6 MIPS32 doesn't have instructions needed to properly handle 64-bit - # atomic instructions. - 'wasm/atomics64-stress': [PASS, ['mips_arch_variant != r6', SKIP]], -}], # 'arch == mipsel or arch == mips' - ############################################################################## ['arch == mips64el or arch == mips64', { @@ -1325,7 +1287,7 @@ ############################################################################## # Skip Liftoff tests on platforms that do not fully implement Liftoff. -['arch not in (x64, ia32, arm64, arm, s390x, ppc64, mipsel, mips64el, loong64)', { +['arch not in (x64, ia32, arm64, arm, s390x, ppc64, mips64el, loong64)', { 'wasm/liftoff': [SKIP], 'wasm/liftoff-debug': [SKIP], 'wasm/tier-up-testing-flag': [SKIP], @@ -1334,7 +1296,7 @@ 'wasm/test-partial-serialization': [SKIP], 'regress/wasm/regress-1248024': [SKIP], 'regress/wasm/regress-1251465': [SKIP], -}], # arch not in (x64, ia32, arm64, arm, s390x, ppc64, mipsel, mips64el, loong64) +}], # arch not in (x64, ia32, arm64, arm, s390x, ppc64, mips64el, loong64) ############################################################################## ['system != linux or sandbox == True', { @@ -1448,13 +1410,13 @@ 'regress/wasm/regress-9017': [SKIP], }], # variant == slow_path -['((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64])', { +['((arch == mips64el or arch == mips64) and not simd_mips) or (arch in [ppc64])', { # Requires scalar lowering for 64x2 SIMD instructions, which are not # implemented yet. # Also skip tests on archs that don't support SIMD and lowering doesn't yet work correctly. # Condition copied from cctest.status. 'regress/wasm/regress-10831': [SKIP], -}], # ((arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simd_mips) or (arch in [ppc64]) +}], # ((arch == mips64el or arch == mips64) and not simd_mips) or (arch in [ppc64]) ############################################################################## ['variant == stress_sampling', { @@ -1542,7 +1504,7 @@ ############################################################################## # TODO(v8:11421): Port baseline compiler to other architectures. -['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, riscv64, riscv32, loong64, s390x) or (arch == s390x and pointer_compression)', { +['arch not in (x64, arm64, ia32, arm, mips64el, riscv64, riscv32, loong64, s390x) or (arch == s390x and pointer_compression)', { 'baseline/*': [SKIP], 'regress/regress-1242306': [SKIP], }], diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status index 164f612109..9ab6b62f01 100644 --- a/test/mozilla/mozilla.status +++ b/test/mozilla/mozilla.status @@ -249,7 +249,7 @@ 'ecma/Date/15.9.2.2-5': [PASS, FAIL], 'ecma/Date/15.9.2.2-6': [PASS, FAIL], - # 1026139: These date tests fail on arm and mips. + # 1026139: These date tests fail on arm. # These date tests also fail in a time zone without daylight saving time. 'ecma/Date/15.9.5.29-1': [PASS, FAIL], 'ecma/Date/15.9.5.28-1': [PASS, FAIL], @@ -270,8 +270,8 @@ 'ecma/Date/15.9.5.18': [PASS, ['no_i18n == False', FAIL]], 'ecma/Date/15.9.5.22-1': [PASS, ['no_i18n == False', FAIL]], - # 1050186: Arm/MIPS vm is broken; probably unrelated to dates - 'ecma/Date/15.9.5.22-2': [PASS, ['no_i18n == False or arch == arm or arch == mipsel or arch == mips', FAIL]], + # 1050186: Arm vm is broken; probably unrelated to dates + 'ecma/Date/15.9.5.22-2': [PASS, ['no_i18n == False or arch == arm', FAIL]], # Flaky test that fails due to what appears to be a bug in the test. # Occurs depending on current time @@ -959,7 +959,7 @@ }], # 'arch == arm64' -['arch == mipsel or arch == mips64el or arch == mips64', { +['arch == mips64el or arch == mips64', { # BUG(3251229): Times out when running new crankshaft test script. 'ecma_3/RegExp/regress-311414': [SKIP], @@ -976,36 +976,12 @@ # BUG(1040): This test might time out. 'js1_5/GC/regress-203278-2': [PASS, SLOW, NO_VARIANTS], -}], # 'arch == mipsel or arch == mips64el or arch == mips64' - -['arch == mipsel and simulator_run', { - # Crashes due to C stack overflow. - 'js1_5/extensions/regress-355497': [SKIP], -}], # 'arch == mipsel and simulator_run' +}], # 'arch == mips64el or arch == mips64' ['arch == mips64el and simulator_run', { 'js1_5/extensions/regress-355497': [FAIL_OK, '--sim-stack-size=512'], }], # 'arch == mips64el and simulator_run' -['arch == mips', { - - # BUG(3251229): Times out when running new crankshaft test script. - 'ecma_3/RegExp/regress-311414': [SKIP], - 'ecma/Date/15.9.5.8': [SKIP], - 'ecma/Date/15.9.5.10-2': [SKIP], - 'ecma/Date/15.9.5.11-2': [SKIP], - 'ecma/Date/15.9.5.12-2': [SKIP], - 'js1_5/Array/regress-99120-02': [SKIP], - 'js1_5/extensions/regress-371636': [SKIP], - 'js1_5/Regress/regress-203278-1': [SKIP], - 'js1_5/Regress/regress-404755': [SKIP], - 'js1_5/Regress/regress-451322': [SKIP], - - - # BUG(1040): This test might time out. - 'js1_5/GC/regress-203278-2': [PASS, SLOW, NO_VARIANTS], -}], # 'arch == mips' - ['arch == arm and simulator_run', { #BUG(3837): Crashes due to C stack overflow. diff --git a/test/test262/test262.status b/test/test262/test262.status index 881d55aaa8..9a0f8c504c 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -835,7 +835,7 @@ 'staging/Temporal/ZonedDateTime/old/withTimezone': [FAIL], }], # no_i18n == True -['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64 or arch == mips64el', { +['arch == arm or arch == arm64 or arch == mips64 or arch == mips64el', { # Causes stack overflow on simulators due to eager compilation of # parenthesized function literals. Needs investigation. @@ -848,7 +848,7 @@ 'built-ins/decodeURIComponent/S15.1.3.2_A2.5_T1': [SKIP], 'built-ins/encodeURI/S15.1.3.3_A2.3_T1': [SKIP], 'built-ins/encodeURIComponent/S15.1.3.4_A2.3_T1': [SKIP], -}], # 'arch == arm or arch == mipsel or arch == mips or arch == arm64' +}], # 'arch == arm or arch == arm64' ['byteorder == big', { # Test failures on big endian platforms due to the way the tests @@ -897,7 +897,7 @@ '*': [SKIP], }], # variant == no_wasm_traps -['variant != default or arch == arm or arch == arm64 or arch == mipsel or arch == mips or arch == mips64 or arch == mips64el', { +['variant != default or arch == arm or arch == arm64 or arch == mips64 or arch == mips64el', { # These tests take a long time to run 'built-ins/RegExp/property-escapes/generated/*': [SKIP], }], # variant != default or arch == arm or arch == arm64 diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn index c72c146799..fbc7dd5368 100644 --- a/test/unittests/BUILD.gn +++ b/test/unittests/BUILD.gn @@ -607,11 +607,6 @@ v8_source_set("unittests_sources") { "assembler/turbo-assembler-ia32-unittest.cc", "compiler/ia32/instruction-selector-ia32-unittest.cc", ] - } else if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel") { - sources += [ - "assembler/turbo-assembler-mips-unittest.cc", - "compiler/mips/instruction-selector-mips-unittest.cc", - ] } else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") { sources += [ "assembler/disasm-mips64-unittest.cc", diff --git a/test/unittests/assembler/turbo-assembler-mips-unittest.cc b/test/unittests/assembler/turbo-assembler-mips-unittest.cc deleted file mode 100644 index b8a645e6a7..0000000000 --- a/test/unittests/assembler/turbo-assembler-mips-unittest.cc +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2018 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/codegen/macro-assembler.h" -#include "src/codegen/mips/assembler-mips-inl.h" -#include "src/execution/simulator.h" -#include "test/common/assembler-tester.h" -#include "test/unittests/test-utils.h" -#include "testing/gtest-support.h" - -namespace v8 { -namespace internal { - -#define __ tasm. - -// Test the x64 assembler by compiling some simple functions into -// a buffer and executing them. These tests do not initialize the -// V8 library, create a context, or use any V8 objects. - -class TurboAssemblerTest : public TestWithIsolate {}; - -TEST_F(TurboAssemblerTest, TestHardAbort) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - __ Abort(AbortReason::kNoReason); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - buffer->MakeExecutable(); - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason"); -} - -TEST_F(TurboAssemblerTest, TestCheck) { - auto buffer = AllocateAssemblerBuffer(); - TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo, - buffer->CreateView()); - __ set_root_array_available(false); - __ set_abort_hard(true); - - // Fail if the first parameter (in {a0}) is 17. - __ Check(Condition::ne, AbortReason::kNoReason, a0, Operand(17)); - __ Ret(); - - CodeDesc desc; - tasm.GetCode(isolate(), &desc); - buffer->MakeExecutable(); - // We need an isolate here to execute in the simulator. - auto f = GeneratedCode::FromBuffer(isolate(), buffer->start()); - - f.Call(0); - f.Call(18); - ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason"); -} - -#undef __ - -} // namespace internal -} // namespace v8 diff --git a/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc b/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc deleted file mode 100644 index 0728d32304..0000000000 --- a/test/unittests/compiler/mips/instruction-selector-mips-unittest.cc +++ /dev/null @@ -1,1426 +0,0 @@ -// Copyright 2014 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file - -#include "test/unittests/compiler/backend/instruction-selector-unittest.h" - -#include "src/objects/objects-inl.h" - -namespace v8 { -namespace internal { -namespace compiler { - -namespace { - -template -struct MachInst { - T constructor; - const char* constructor_name; - ArchOpcode arch_opcode; - MachineType machine_type; -}; - -template -std::ostream& operator<<(std::ostream& os, const MachInst& mi) { - return os << mi.constructor_name; -} - -using MachInst1 = MachInst; -using MachInst2 = MachInst; - -// To avoid duplicated code IntCmp helper structure -// is created. It contains MachInst2 with two nodes and expected_size -// because different cmp instructions have different size. -struct IntCmp { - MachInst2 mi; - uint32_t expected_size; -}; - -struct FPCmp { - MachInst2 mi; - FlagsCondition cond; -}; - -const FPCmp kFPCmpInstructions[] = { - {{&RawMachineAssembler::Float64Equal, "Float64Equal", kMipsCmpD, - MachineType::Float64()}, - kEqual}, - {{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMipsCmpD, - MachineType::Float64()}, - kUnsignedLessThan}, - {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual", - kMipsCmpD, MachineType::Float64()}, - kUnsignedLessThanOrEqual}, - {{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan", kMipsCmpD, - MachineType::Float64()}, - kUnsignedLessThan}, - {{&RawMachineAssembler::Float64GreaterThanOrEqual, - "Float64GreaterThanOrEqual", kMipsCmpD, MachineType::Float64()}, - kUnsignedLessThanOrEqual}}; - -struct Conversion { - // The machine_type field in MachInst1 represents the destination type. - MachInst1 mi; - MachineType src_machine_type; -}; - - -// ---------------------------------------------------------------------------- -// Logical instructions. -// ---------------------------------------------------------------------------- - - -const MachInst2 kLogicalInstructions[] = { - {&RawMachineAssembler::WordAnd, "WordAnd", kMipsAnd, MachineType::Int16()}, - {&RawMachineAssembler::WordOr, "WordOr", kMipsOr, MachineType::Int16()}, - {&RawMachineAssembler::WordXor, "WordXor", kMipsXor, MachineType::Int16()}, - {&RawMachineAssembler::Word32And, "Word32And", kMipsAnd, - MachineType::Int32()}, - {&RawMachineAssembler::Word32Or, "Word32Or", kMipsOr, MachineType::Int32()}, - {&RawMachineAssembler::Word32Xor, "Word32Xor", kMipsXor, - MachineType::Int32()}}; - - -// ---------------------------------------------------------------------------- -// Shift instructions. -// ---------------------------------------------------------------------------- - - -const MachInst2 kShiftInstructions[] = { - {&RawMachineAssembler::WordShl, "WordShl", kMipsShl, MachineType::Int16()}, - {&RawMachineAssembler::WordShr, "WordShr", kMipsShr, MachineType::Int16()}, - {&RawMachineAssembler::WordSar, "WordSar", kMipsSar, MachineType::Int16()}, - {&RawMachineAssembler::WordRor, "WordRor", kMipsRor, MachineType::Int16()}, - {&RawMachineAssembler::Word32Shl, "Word32Shl", kMipsShl, - MachineType::Int32()}, - {&RawMachineAssembler::Word32Shr, "Word32Shr", kMipsShr, - MachineType::Int32()}, - {&RawMachineAssembler::Word32Sar, "Word32Sar", kMipsSar, - MachineType::Int32()}, - {&RawMachineAssembler::Word32Ror, "Word32Ror", kMipsRor, - MachineType::Int32()}}; - - -// ---------------------------------------------------------------------------- -// MUL/DIV instructions. -// ---------------------------------------------------------------------------- - - -const MachInst2 kMulDivInstructions[] = { - {&RawMachineAssembler::Int32Mul, "Int32Mul", kMipsMul, - MachineType::Int32()}, - {&RawMachineAssembler::Int32Div, "Int32Div", kMipsDiv, - MachineType::Int32()}, - {&RawMachineAssembler::Uint32Div, "Uint32Div", kMipsDivU, - MachineType::Uint32()}, - {&RawMachineAssembler::Float64Mul, "Float64Mul", kMipsMulD, - MachineType::Float64()}, - {&RawMachineAssembler::Float64Div, "Float64Div", kMipsDivD, - MachineType::Float64()}}; - - -// ---------------------------------------------------------------------------- -// MOD instructions. -// ---------------------------------------------------------------------------- - - -const MachInst2 kModInstructions[] = { - {&RawMachineAssembler::Int32Mod, "Int32Mod", kMipsMod, - MachineType::Int32()}, - {&RawMachineAssembler::Uint32Mod, "Int32UMod", kMipsModU, - MachineType::Int32()}, - {&RawMachineAssembler::Float64Mod, "Float64Mod", kMipsModD, - MachineType::Float64()}}; - - -// ---------------------------------------------------------------------------- -// Arithmetic FPU instructions. -// ---------------------------------------------------------------------------- - - -const MachInst2 kFPArithInstructions[] = { - {&RawMachineAssembler::Float64Add, "Float64Add", kMipsAddD, - MachineType::Float64()}, - {&RawMachineAssembler::Float64Sub, "Float64Sub", kMipsSubD, - MachineType::Float64()}}; - - -// ---------------------------------------------------------------------------- -// IntArithTest instructions, two nodes. -// ---------------------------------------------------------------------------- - - -const MachInst2 kAddSubInstructions[] = { - {&RawMachineAssembler::Int32Add, "Int32Add", kMipsAdd, - MachineType::Int32()}, - {&RawMachineAssembler::Int32Sub, "Int32Sub", kMipsSub, - MachineType::Int32()}, - {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow", - kMipsAddOvf, MachineType::Int32()}, - {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow", - kMipsSubOvf, MachineType::Int32()}}; - - -// ---------------------------------------------------------------------------- -// IntArithTest instructions, one node. -// ---------------------------------------------------------------------------- - - -const MachInst1 kAddSubOneInstructions[] = { - {&RawMachineAssembler::Int32Neg, "Int32Neg", kMipsSub, - MachineType::Int32()}, - // TODO(dusmil): check this ... - // {&RawMachineAssembler::WordEqual , "WordEqual" , kMipsTst, - // MachineType::Int32()} -}; - - -// ---------------------------------------------------------------------------- -// Arithmetic compare instructions. -// ---------------------------------------------------------------------------- - - -const IntCmp kCmpInstructions[] = { - {{&RawMachineAssembler::WordEqual, "WordEqual", kMipsCmp, - MachineType::Int16()}, - 1U}, - {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMipsCmp, - MachineType::Int16()}, - 1U}, - {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMipsCmp, - MachineType::Int32()}, - 1U}, - {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMipsCmp, - MachineType::Int32()}, - 1U}, - {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMipsCmp, - MachineType::Int32()}, - 1U}, - {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual", - kMipsCmp, MachineType::Int32()}, - 1U}, - {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMipsCmp, - MachineType::Int32()}, - 1U}, - {{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual", - kMipsCmp, MachineType::Int32()}, - 1U}, - {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMipsCmp, - MachineType::Uint32()}, - 1U}, - {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual", - kMipsCmp, MachineType::Uint32()}, - 1U}}; - - -// ---------------------------------------------------------------------------- -// Conversion instructions. -// ---------------------------------------------------------------------------- - -const Conversion kConversionInstructions[] = { - // Conversion instructions are related to machine_operator.h: - // FPU conversions: - // Convert representation of integers between float64 and int32/uint32. - // The precise rounding mode and handling of out of range inputs are *not* - // defined for these operators, since they are intended only for use with - // integers. - // mips instruction: cvt_d_w - {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64", - kMipsCvtDW, MachineType::Float64()}, - MachineType::Int32()}, - - // mips instruction: cvt_d_uw - {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64", - kMipsCvtDUw, MachineType::Float64()}, - MachineType::Int32()}, - - // mips instruction: trunc_w_d - {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32", - kMipsTruncWD, MachineType::Float64()}, - MachineType::Int32()}, - - // mips instruction: trunc_uw_d - {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32", - kMipsTruncUwD, MachineType::Float64()}, - MachineType::Int32()}}; - -const Conversion kFloat64RoundInstructions[] = { - {{&RawMachineAssembler::Float64RoundUp, "Float64RoundUp", kMipsCeilWD, - MachineType::Int32()}, - MachineType::Float64()}, - {{&RawMachineAssembler::Float64RoundDown, "Float64RoundDown", kMipsFloorWD, - MachineType::Int32()}, - MachineType::Float64()}, - {{&RawMachineAssembler::Float64RoundTiesEven, "Float64RoundTiesEven", - kMipsRoundWD, MachineType::Int32()}, - MachineType::Float64()}, - {{&RawMachineAssembler::Float64RoundTruncate, "Float64RoundTruncate", - kMipsTruncWD, MachineType::Int32()}, - MachineType::Float64()}}; - -const Conversion kFloat32RoundInstructions[] = { - {{&RawMachineAssembler::Float32RoundUp, "Float32RoundUp", kMipsCeilWS, - MachineType::Int32()}, - MachineType::Float32()}, - {{&RawMachineAssembler::Float32RoundDown, "Float32RoundDown", kMipsFloorWS, - MachineType::Int32()}, - MachineType::Float32()}, - {{&RawMachineAssembler::Float32RoundTiesEven, "Float32RoundTiesEven", - kMipsRoundWS, MachineType::Int32()}, - MachineType::Float32()}, - {{&RawMachineAssembler::Float32RoundTruncate, "Float32RoundTruncate", - kMipsTruncWS, MachineType::Int32()}, - MachineType::Float32()}}; - -} // namespace - -using InstructionSelectorFPCmpTest = InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorFPCmpTest, Parameter) { - const FPCmp cmp = GetParam(); - StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type, - cmp.mi.machine_type); - m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(kFlags_set, s[0]->flags_mode()); - EXPECT_EQ(cmp.cond, s[0]->flags_condition()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest, - ::testing::ValuesIn(kFPCmpInstructions)); - -// ---------------------------------------------------------------------------- -// Arithmetic compare instructions integers. -// ---------------------------------------------------------------------------- - -using InstructionSelectorCmpTest = InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorCmpTest, Parameter) { - const IntCmp cmp = GetParam(); - const MachineType type = cmp.mi.machine_type; - StreamBuilder m(this, type, type, type); - m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1))); - Stream s = m.Build(); - ASSERT_EQ(cmp.expected_size, s.size()); - EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorCmpTest, - ::testing::ValuesIn(kCmpInstructions)); - -// ---------------------------------------------------------------------------- -// Shift instructions. -// ---------------------------------------------------------------------------- - -using InstructionSelectorShiftTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorShiftTest, Immediate) { - const MachInst2 dpi = GetParam(); - const MachineType type = dpi.machine_type; - TRACED_FORRANGE(int32_t, imm, 0, - ((1 << ElementSizeLog2Of(type.representation())) * 8) - 1) { - StreamBuilder m(this, type, type); - m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate()); - EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1))); - EXPECT_EQ(1U, s[0]->OutputCount()); - } -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorShiftTest, - ::testing::ValuesIn(kShiftInstructions)); - -TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) { - // The available shift operand range is `0 <= imm < 32`, but we also test - // that immediates outside this range are handled properly (modulo-32). - TRACED_FORRANGE(int32_t, shift, -32, 63) { - int32_t lsb = shift & 0x1F; - TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) { - uint32_t jnk = rng()->NextInt(); - jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0; - uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk; - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)), - m.Int32Constant(shift))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsExt, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1))); - EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2))); - } - } - TRACED_FORRANGE(int32_t, shift, -32, 63) { - int32_t lsb = shift & 0x1F; - TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) { - uint32_t jnk = rng()->NextInt(); - jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0; - uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk; - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)), - m.Int32Constant(shift))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsExt, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1))); - EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2))); - } - } -} - - -TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) { - TRACED_FORRANGE(int32_t, shift, 0, 30) { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - Node* const p0 = m.Parameter(0); - Node* const r = - m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)), - m.Int32Constant(shift + 1)); - m.Return(r); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsShl, s[0]->arch_opcode()); - ASSERT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output())); - } -} - -TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) { - if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) { - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - Node* const p0 = m.Parameter(0); - Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), - m.Int32Constant(24)); - m.Return(r); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsSeb, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output())); - } - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - Node* const p0 = m.Parameter(0); - Node* const r = m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), - m.Int32Constant(16)); - m.Return(r); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsSeh, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output())); - } - } -} - -// ---------------------------------------------------------------------------- -// Logical instructions. -// ---------------------------------------------------------------------------- - -using InstructionSelectorLogicalTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorLogicalTest, Parameter) { - const MachInst2 dpi = GetParam(); - const MachineType type = dpi.machine_type; - StreamBuilder m(this, type, type, type); - m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - InstructionSelectorLogicalTest, - ::testing::ValuesIn(kLogicalInstructions)); - -TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) { - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsNor, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsNor, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } -} - - -TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) { - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32Xor(m.Word32Or(m.Parameter(0), m.Parameter(0)), - m.Int32Constant(-1))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsNor, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32Xor(m.Int32Constant(-1), - m.Word32Or(m.Parameter(0), m.Parameter(0)))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsNor, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } -} - - -TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) { - // The available shift operand range is `0 <= imm < 32`, but we also test - // that immediates outside this range are handled properly (modulo-32). - TRACED_FORRANGE(int32_t, shift, -32, 63) { - int32_t lsb = shift & 0x1F; - TRACED_FORRANGE(int32_t, width, 1, 31) { - uint32_t msk = (1 << width) - 1; - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)), - m.Int32Constant(msk))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsExt, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1))); - int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width; - EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2))); - } - } - TRACED_FORRANGE(int32_t, shift, -32, 63) { - int32_t lsb = shift & 0x1F; - TRACED_FORRANGE(int32_t, width, 1, 31) { - uint32_t msk = (1 << width) - 1; - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return( - m.Word32And(m.Int32Constant(msk), - m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsExt, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1))); - int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width; - EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2))); - } - } -} - - -TEST_F(InstructionSelectorTest, Word32AndToClearBits) { - TRACED_FORRANGE(int32_t, shift, 1, 31) { - int32_t mask = ~((1 << shift) - 1); - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32And(m.Parameter(0), m.Int32Constant(mask))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsIns, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1))); - EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2))); - } - TRACED_FORRANGE(int32_t, shift, 1, 31) { - int32_t mask = ~((1 << shift) - 1); - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32And(m.Int32Constant(mask), m.Parameter(0))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsIns, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1))); - EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2))); - } -} - - -// ---------------------------------------------------------------------------- -// MUL/DIV instructions. -// ---------------------------------------------------------------------------- - -using InstructionSelectorMulDivTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorMulDivTest, Parameter) { - const MachInst2 dpi = GetParam(); - const MachineType type = dpi.machine_type; - StreamBuilder m(this, type, type, type); - m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMulDivTest, - ::testing::ValuesIn(kMulDivInstructions)); - -// ---------------------------------------------------------------------------- -// MOD instructions. -// ---------------------------------------------------------------------------- - -using InstructionSelectorModTest = InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorModTest, Parameter) { - const MachInst2 dpi = GetParam(); - const MachineType type = dpi.machine_type; - StreamBuilder m(this, type, type, type); - m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorModTest, - ::testing::ValuesIn(kModInstructions)); - -// ---------------------------------------------------------------------------- -// Floating point instructions. -// ---------------------------------------------------------------------------- - -using InstructionSelectorFPArithTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorFPArithTest, Parameter) { - const MachInst2 fpa = GetParam(); - StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type); - m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - InstructionSelectorFPArithTest, - ::testing::ValuesIn(kFPArithInstructions)); - -// ---------------------------------------------------------------------------- -// Integer arithmetic. -// ---------------------------------------------------------------------------- - -using InstructionSelectorIntArithTwoTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorIntArithTwoTest, Parameter) { - const MachInst2 intpa = GetParam(); - StreamBuilder m(this, intpa.machine_type, intpa.machine_type, - intpa.machine_type); - m.Return((m.*intpa.constructor)(m.Parameter(0), m.Parameter(1))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - InstructionSelectorIntArithTwoTest, - ::testing::ValuesIn(kAddSubInstructions)); - -// ---------------------------------------------------------------------------- -// One node. -// ---------------------------------------------------------------------------- - -using InstructionSelectorIntArithOneTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorIntArithOneTest, Parameter) { - const MachInst1 intpa = GetParam(); - StreamBuilder m(this, intpa.machine_type, intpa.machine_type, - intpa.machine_type); - m.Return((m.*intpa.constructor)(m.Parameter(0))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - InstructionSelectorIntArithOneTest, - ::testing::ValuesIn(kAddSubOneInstructions)); - -// ---------------------------------------------------------------------------- -// Conversions. -// ---------------------------------------------------------------------------- - -using InstructionSelectorConversionTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorConversionTest, Parameter) { - const Conversion conv = GetParam(); - StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type); - m.Return((m.*conv.mi.constructor)(m.Parameter(0))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - InstructionSelectorConversionTest, - ::testing::ValuesIn(kConversionInstructions)); - -using CombineChangeFloat64ToInt32WithRoundFloat64 = - InstructionSelectorTestWithParam; - -TEST_P(CombineChangeFloat64ToInt32WithRoundFloat64, Parameter) { - { - const Conversion conv = GetParam(); - StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type); - m.Return(m.ChangeFloat64ToInt32((m.*conv.mi.constructor)(m.Parameter(0)))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(kMode_None, s[0]->addressing_mode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - CombineChangeFloat64ToInt32WithRoundFloat64, - ::testing::ValuesIn(kFloat64RoundInstructions)); - -using CombineChangeFloat32ToInt32WithRoundFloat32 = - InstructionSelectorTestWithParam; - -TEST_P(CombineChangeFloat32ToInt32WithRoundFloat32, Parameter) { - { - const Conversion conv = GetParam(); - StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type); - m.Return(m.ChangeFloat64ToInt32( - m.ChangeFloat32ToFloat64((m.*conv.mi.constructor)(m.Parameter(0))))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode()); - EXPECT_EQ(kMode_None, s[0]->addressing_mode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - CombineChangeFloat32ToInt32WithRoundFloat32, - ::testing::ValuesIn(kFloat32RoundInstructions)); - -TEST_F(InstructionSelectorTest, ChangeFloat64ToInt32OfChangeFloat32ToFloat64) { - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Float32()); - m.Return(m.ChangeFloat64ToInt32(m.ChangeFloat32ToFloat64(m.Parameter(0)))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsTruncWS, s[0]->arch_opcode()); - EXPECT_EQ(kMode_None, s[0]->addressing_mode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } -} - - -TEST_F(InstructionSelectorTest, - TruncateFloat64ToFloat32OfChangeInt32ToFloat64) { - { - StreamBuilder m(this, MachineType::Float32(), MachineType::Int32()); - m.Return( - m.TruncateFloat64ToFloat32(m.ChangeInt32ToFloat64(m.Parameter(0)))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsCvtSW, s[0]->arch_opcode()); - EXPECT_EQ(kMode_None, s[0]->addressing_mode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } -} - - -// ---------------------------------------------------------------------------- -// Loads and stores. -// ---------------------------------------------------------------------------- - -namespace { - -struct MemoryAccess { - MachineType type; - ArchOpcode load_opcode; - ArchOpcode store_opcode; -}; - - -static const MemoryAccess kMemoryAccesses[] = { - {MachineType::Int8(), kMipsLb, kMipsSb}, - {MachineType::Uint8(), kMipsLbu, kMipsSb}, - {MachineType::Int16(), kMipsLh, kMipsSh}, - {MachineType::Uint16(), kMipsLhu, kMipsSh}, - {MachineType::Int32(), kMipsLw, kMipsSw}, - {MachineType::Float32(), kMipsLwc1, kMipsSwc1}, - {MachineType::Float64(), kMipsLdc1, kMipsSdc1}}; - - -struct MemoryAccessImm { - MachineType type; - ArchOpcode load_opcode; - ArchOpcode store_opcode; - bool (InstructionSelectorTest::Stream::*val_predicate)( - const InstructionOperand*) const; - const int32_t immediates[40]; -}; - - -std::ostream& operator<<(std::ostream& os, const MemoryAccessImm& acc) { - return os << acc.type; -} - - -struct MemoryAccessImm1 { - MachineType type; - ArchOpcode load_opcode; - ArchOpcode store_opcode; - bool (InstructionSelectorTest::Stream::*val_predicate)( - const InstructionOperand*) const; - const int32_t immediates[5]; -}; - - -std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) { - return os << acc.type; -} - -struct MemoryAccessImm2 { - MachineType type; - ArchOpcode store_opcode; - ArchOpcode store_opcode_unaligned; - bool (InstructionSelectorTest::Stream::*val_predicate)( - const InstructionOperand*) const; - const int32_t immediates[40]; -}; - -std::ostream& operator<<(std::ostream& os, const MemoryAccessImm2& acc) { - return os << acc.type; -} - -// ---------------------------------------------------------------------------- -// Loads and stores immediate values. -// ---------------------------------------------------------------------------- - - -const MemoryAccessImm kMemoryAccessesImm[] = { - {MachineType::Int8(), - kMipsLb, - kMipsSb, - &InstructionSelectorTest::Stream::IsInteger, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89, - -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109, - 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, - {MachineType::Uint8(), - kMipsLbu, - kMipsSb, - &InstructionSelectorTest::Stream::IsInteger, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89, - -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109, - 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, - {MachineType::Int16(), - kMipsLh, - kMipsSh, - &InstructionSelectorTest::Stream::IsInteger, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89, - -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109, - 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, - {MachineType::Uint16(), - kMipsLhu, - kMipsSh, - &InstructionSelectorTest::Stream::IsInteger, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89, - -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109, - 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, - {MachineType::Int32(), - kMipsLw, - kMipsSw, - &InstructionSelectorTest::Stream::IsInteger, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89, - -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109, - 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, - {MachineType::Float32(), - kMipsLwc1, - kMipsSwc1, - &InstructionSelectorTest::Stream::IsDouble, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89, - -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109, - 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, - {MachineType::Float64(), - kMipsLdc1, - kMipsSdc1, - &InstructionSelectorTest::Stream::IsDouble, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89, - -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109, - 115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}}; - -const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = { - {MachineType::Int8(), - kMipsLb, - kMipsSb, - &InstructionSelectorTest::Stream::IsInteger, - {-65000, -55000, 32777, 55000, 65000}}, - {MachineType::Uint8(), - kMipsLbu, - kMipsSb, - &InstructionSelectorTest::Stream::IsInteger, - {-65000, -55000, 32777, 55000, 65000}}, - {MachineType::Int16(), - kMipsLh, - kMipsSh, - &InstructionSelectorTest::Stream::IsInteger, - {-65000, -55000, 32777, 55000, 65000}}, - {MachineType::Uint16(), - kMipsLhu, - kMipsSh, - &InstructionSelectorTest::Stream::IsInteger, - {-65000, -55000, 32777, 55000, 65000}}, - {MachineType::Int32(), - kMipsLw, - kMipsSw, - &InstructionSelectorTest::Stream::IsInteger, - {-65000, -55000, 32777, 55000, 65000}}, - {MachineType::Float32(), - kMipsLwc1, - kMipsSwc1, - &InstructionSelectorTest::Stream::IsDouble, - {-65000, -55000, 32777, 55000, 65000}}, - {MachineType::Float64(), - kMipsLdc1, - kMipsSdc1, - &InstructionSelectorTest::Stream::IsDouble, - {-65000, -55000, 32777, 55000, 65000}}}; - -const MemoryAccessImm2 kMemoryAccessesImmUnaligned[] = { - {MachineType::Int16(), - kMipsUsh, - kMipsSh, - &InstructionSelectorTest::Stream::IsInteger, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, - -89, -87, -86, -82, -44, -23, -3, 0, 7, 10, - 39, 52, 69, 71, 91, 92, 107, 109, 115, 124, - 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, - {MachineType::Int32(), - kMipsUsw, - kMipsSw, - &InstructionSelectorTest::Stream::IsInteger, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, - -89, -87, -86, -82, -44, -23, -3, 0, 7, 10, - 39, 52, 69, 71, 91, 92, 107, 109, 115, 124, - 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, - {MachineType::Float32(), - kMipsUswc1, - kMipsSwc1, - &InstructionSelectorTest::Stream::IsDouble, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, - -89, -87, -86, -82, -44, -23, -3, 0, 7, 10, - 39, 52, 69, 71, 91, 92, 107, 109, 115, 124, - 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}, - {MachineType::Float64(), - kMipsUsdc1, - kMipsSdc1, - &InstructionSelectorTest::Stream::IsDouble, - {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, - -89, -87, -86, -82, -44, -23, -3, 0, 7, 10, - 39, 52, 69, 71, 91, 92, 107, 109, 115, 124, - 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}}; - -} // namespace - -using InstructionSelectorMemoryAccessTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) { - const MemoryAccess memacc = GetParam(); - StreamBuilder m(this, memacc.type, MachineType::Pointer(), - MachineType::Int32()); - m.Return(m.Load(memacc.type, m.Parameter(0))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); - EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); -} - - -TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) { - const MemoryAccess memacc = GetParam(); - StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(), - MachineType::Int32(), memacc.type); - m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1), - kNoWriteBarrier); - m.Return(m.Int32Constant(0)); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); - EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - InstructionSelectorMemoryAccessTest, - ::testing::ValuesIn(kMemoryAccesses)); - -// ---------------------------------------------------------------------------- -// Load immediate. -// ---------------------------------------------------------------------------- - -using InstructionSelectorMemoryAccessImmTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) { - const MemoryAccessImm memacc = GetParam(); - TRACED_FOREACH(int32_t, index, memacc.immediates) { - StreamBuilder m(this, memacc.type, MachineType::Pointer()); - m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); - EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); - ASSERT_EQ(2U, s[0]->InputCount()); - ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); - EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output())); - } -} - - -// ---------------------------------------------------------------------------- -// Store immediate. -// ---------------------------------------------------------------------------- - - -TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) { - const MemoryAccessImm memacc = GetParam(); - TRACED_FOREACH(int32_t, index, memacc.immediates) { - StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(), - memacc.type); - m.Store(memacc.type.representation(), m.Parameter(0), - m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier); - m.Return(m.Int32Constant(0)); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); - EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); - ASSERT_EQ(3U, s[0]->InputCount()); - ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); - EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); - EXPECT_EQ(0U, s[0]->OutputCount()); - } -} - -TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) { - const MemoryAccessImm memacc = GetParam(); - TRACED_FOREACH(int32_t, index, memacc.immediates) { - StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer()); - m.Store(memacc.type.representation(), m.Parameter(0), - m.Int32Constant(index), m.Int32Constant(0), kNoWriteBarrier); - m.Return(m.Int32Constant(0)); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); - EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); - ASSERT_EQ(3U, s[0]->InputCount()); - ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); - EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); - ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind()); - EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2))); - EXPECT_EQ(0U, s[0]->OutputCount()); - } -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - InstructionSelectorMemoryAccessImmTest, - ::testing::ValuesIn(kMemoryAccessesImm)); - -using InstructionSelectorMemoryAccessUnalignedImmTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorMemoryAccessUnalignedImmTest, StoreZero) { - const MemoryAccessImm2 memacc = GetParam(); - TRACED_FOREACH(int32_t, index, memacc.immediates) { - StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer()); - bool unaligned_store_supported = - m.machine()->UnalignedStoreSupported(memacc.type.representation()); - m.UnalignedStore(memacc.type.representation(), m.Parameter(0), - m.Int32Constant(index), m.Int32Constant(0)); - m.Return(m.Int32Constant(0)); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(unaligned_store_supported ? memacc.store_opcode_unaligned - : memacc.store_opcode, - s[0]->arch_opcode()); - EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); - ASSERT_EQ(3U, s[0]->InputCount()); - ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind()); - EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1))); - ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind()); - EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2))); - EXPECT_EQ(0U, s[0]->OutputCount()); - } -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - InstructionSelectorMemoryAccessUnalignedImmTest, - ::testing::ValuesIn(kMemoryAccessesImmUnaligned)); - -// ---------------------------------------------------------------------------- -// Load/store offsets more than 16 bits. -// ---------------------------------------------------------------------------- - -using InstructionSelectorMemoryAccessImmMoreThan16bitTest = - InstructionSelectorTestWithParam; - -TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest, - LoadWithImmediateIndex) { - const MemoryAccessImm1 memacc = GetParam(); - TRACED_FOREACH(int32_t, index, memacc.immediates) { - StreamBuilder m(this, memacc.type, MachineType::Pointer()); - m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode()); - EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); - EXPECT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } -} - - -TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest, - StoreWithImmediateIndex) { - const MemoryAccessImm1 memacc = GetParam(); - TRACED_FOREACH(int32_t, index, memacc.immediates) { - StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(), - memacc.type); - m.Store(memacc.type.representation(), m.Parameter(0), - m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier); - m.Return(m.Int32Constant(0)); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode()); - EXPECT_EQ(kMode_MRI, s[0]->addressing_mode()); - EXPECT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(0U, s[0]->OutputCount()); - } -} - -INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, - InstructionSelectorMemoryAccessImmMoreThan16bitTest, - ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit)); - -// ---------------------------------------------------------------------------- -// kMipsTst testing. -// ---------------------------------------------------------------------------- - - -TEST_F(InstructionSelectorTest, Word32EqualWithZero) { - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsCmp, s[0]->arch_opcode()); - EXPECT_EQ(kMode_None, s[0]->addressing_mode()); - ASSERT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(kFlags_set, s[0]->flags_mode()); - EXPECT_EQ(kEqual, s[0]->flags_condition()); - } - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsCmp, s[0]->arch_opcode()); - EXPECT_EQ(kMode_None, s[0]->addressing_mode()); - ASSERT_EQ(2U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(kFlags_set, s[0]->flags_mode()); - EXPECT_EQ(kEqual, s[0]->flags_condition()); - } -} - - -TEST_F(InstructionSelectorTest, Word32Clz) { - StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Word32Clz(p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsClz, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); -} - - -TEST_F(InstructionSelectorTest, Float32Abs) { - StreamBuilder m(this, MachineType::Float32(), MachineType::Float32()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float32Abs(p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsAbsS, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); -} - - -TEST_F(InstructionSelectorTest, Float64Abs) { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const n = m.Float64Abs(p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsAbsD, s[0]->arch_opcode()); - ASSERT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); -} - -TEST_F(InstructionSelectorTest, Float32AddWithFloat32Mul) { - if (!IsMipsArchVariant(kMips32r2)) { - return; - } - { - StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), - MachineType::Float32(), MachineType::Float32()); - Node* const p0 = m.Parameter(0); - Node* const p1 = m.Parameter(1); - Node* const p2 = m.Parameter(2); - Node* const n = m.Float32Add(m.Float32Mul(p0, p1), p2); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); - EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_FALSE( - UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } - { - StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), - MachineType::Float32(), MachineType::Float32()); - Node* const p0 = m.Parameter(0); - Node* const p1 = m.Parameter(1); - Node* const p2 = m.Parameter(2); - Node* const n = m.Float32Add(p0, m.Float32Mul(p1, p2)); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsMaddS, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); - EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_FALSE( - UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } -} - -TEST_F(InstructionSelectorTest, Float64AddWithFloat64Mul) { - if (!IsMipsArchVariant(kMips32r2)) { - return; - } - { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), - MachineType::Float64(), MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const p1 = m.Parameter(1); - Node* const p2 = m.Parameter(2); - Node* const n = m.Float64Add(m.Float64Mul(p0, p1), p2); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0))); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1))); - EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(2))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_FALSE( - UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } - { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), - MachineType::Float64(), MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const p1 = m.Parameter(1); - Node* const p2 = m.Parameter(2); - Node* const n = m.Float64Add(p0, m.Float64Mul(p1, p2)); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsMaddD, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); - EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_FALSE( - UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } -} - -TEST_F(InstructionSelectorTest, Float32SubWithFloat32Mul) { - StreamBuilder m(this, MachineType::Float32(), MachineType::Float32(), - MachineType::Float32(), MachineType::Float32()); - if (!IsMipsArchVariant(kMips32r2)) { - return; - } - { - Node* const p0 = m.Parameter(0); - Node* const p1 = m.Parameter(1); - Node* const p2 = m.Parameter(2); - Node* n = nullptr; - - n = m.Float32Sub(m.Float32Mul(p1, p2), p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsMsubS, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); - EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_FALSE( - UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } -} - -TEST_F(InstructionSelectorTest, Float64SubWithFloat64Mul) { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), - MachineType::Float64(), MachineType::Float64()); - if (!IsMipsArchVariant(kMips32r2)) { - return; - } - { - Node* const p0 = m.Parameter(0); - Node* const p1 = m.Parameter(1); - Node* const p2 = m.Parameter(2); - Node* n = nullptr; - - n = m.Float64Sub(m.Float64Mul(p1, p2), p0); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsMsubD, s[0]->arch_opcode()); - ASSERT_EQ(3U, s[0]->InputCount()); - EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0))); - EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1))); - EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(2))); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_FALSE( - UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); - EXPECT_EQ(kFlags_none, s[0]->flags_mode()); - } -} - -TEST_F(InstructionSelectorTest, Float64Max) { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), - MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const p1 = m.Parameter(1); - Node* const n = m.Float64Max(p0, p1); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsFloat64Max, s[0]->arch_opcode()); - ASSERT_EQ(2U, s[0]->InputCount()); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); -} - - -TEST_F(InstructionSelectorTest, Float64Min) { - StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(), - MachineType::Float64()); - Node* const p0 = m.Parameter(0); - Node* const p1 = m.Parameter(1); - Node* const n = m.Float64Min(p0, p1); - m.Return(n); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsFloat64Min, s[0]->arch_opcode()); - ASSERT_EQ(2U, s[0]->InputCount()); - ASSERT_EQ(1U, s[0]->OutputCount()); - EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output())); -} - -TEST_F(InstructionSelectorTest, Word32ReverseBytes) { - { - StreamBuilder m(this, MachineType::Int32(), MachineType::Int32()); - m.Return(m.Word32ReverseBytes(m.Parameter(0))); - Stream s = m.Build(); - ASSERT_EQ(1U, s.size()); - EXPECT_EQ(kMipsByteSwap32, s[0]->arch_opcode()); - EXPECT_EQ(1U, s[0]->InputCount()); - EXPECT_EQ(1U, s[0]->OutputCount()); - } -} - -} // namespace compiler -} // namespace internal -} // namespace v8 diff --git a/test/unittests/regexp/regexp-unittest.cc b/test/unittests/regexp/regexp-unittest.cc index 8ce5fb187c..f4e0f95776 100644 --- a/test/unittests/regexp/regexp-unittest.cc +++ b/test/unittests/regexp/regexp-unittest.cc @@ -615,8 +615,6 @@ using ArchRegExpMacroAssembler = RegExpMacroAssemblerARM64; using ArchRegExpMacroAssembler = RegExpMacroAssemblerS390; #elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 using ArchRegExpMacroAssembler = RegExpMacroAssemblerPPC; -#elif V8_TARGET_ARCH_MIPS -using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS; #elif V8_TARGET_ARCH_MIPS64 using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS; #elif V8_TARGET_ARCH_LOONG64 diff --git a/test/unittests/unittests.status b/test/unittests/unittests.status index 1f1891fdd3..3242ed72ce 100644 --- a/test/unittests/unittests.status +++ b/test/unittests/unittests.status @@ -102,12 +102,6 @@ 'BytecodeGeneratorTest.WideRegisters': [SKIP], }], # arch == ppc64 -############################################################################## -['arch == mips', { - # TODO(mips-team): Currently fails on mips board. - 'ParsingTest.TooManyArguments': [SKIP], -}], # 'arch == mips' - ############################################################################## ['arch == mips64el', { # TODO(mips-team): mips64 do not allocate odd register in liftoff. diff --git a/test/unittests/wasm/liftoff-register-unittests.cc b/test/unittests/wasm/liftoff-register-unittests.cc index 6a0fd21124..d5cad9cc97 100644 --- a/test/unittests/wasm/liftoff-register-unittests.cc +++ b/test/unittests/wasm/liftoff-register-unittests.cc @@ -7,8 +7,6 @@ #include "src/execution/ia32/frame-constants-ia32.h" #elif V8_TARGET_ARCH_X64 #include "src/execution/x64/frame-constants-x64.h" -#elif V8_TARGET_ARCH_MIPS -#include "src/execution/mips/frame-constants-mips.h" #elif V8_TARGET_ARCH_MIPS64 #include "src/execution/mips64/frame-constants-mips64.h" #elif V8_TARGET_ARCH_LOONG64 diff --git a/test/wasm-spec-tests/wasm-spec-tests.status b/test/wasm-spec-tests/wasm-spec-tests.status index 8890625e0c..82366e0d27 100644 --- a/test/wasm-spec-tests/wasm-spec-tests.status +++ b/test/wasm-spec-tests/wasm-spec-tests.status @@ -50,7 +50,7 @@ 'simd_f32x4_pmin_pmax' : [PASS, FAIL], }], # arch == arm and not simulator_run -['arch == mipsel or arch == mips64el or arch == mips or arch == mips64', { +['arch == mips64el or arch == mips64', { # These tests fail because mips does not support the correct NaN bit patterns. 'float_misc': [SKIP], 'float_exprs': [SKIP], @@ -80,19 +80,19 @@ 'proposals/tail-call/float_exprs': [SKIP], 'proposals/tail-call/float_misc': [SKIP], 'proposals/tail-call/conversions': [SKIP], -}], # 'arch == mipsel or arch == mips64el or arch == mips or arch == mips64' +}], # 'arch == mips64el or arch == mips64' -['(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simulator_run', { +['(arch == mips64el or arch == mips64) and not simulator_run', { # This test fail because mips does not support the correct NaN bit patterns. # But it doesn't fail in simulator. 'conversions': [SKIP], -}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simulator_run' +}], # '(arch == mips64el or arch == mips64) and not simulator_run' -['(arch == mipsel or arch == mips64el or arch == loong64) and simulator_run', { +['(arch == mips64el or arch == loong64) and simulator_run', { # These tests need larger stack size on simulator. 'skip-stack-guard-page': '--sim-stack-size=8192', 'proposals/tail-call/skip-stack-guard-page': '--sim-stack-size=8192', -}], # '(arch == mipsel or arch == mips64el) and simulator_run' +}], # '(arch == mips64el or arch == loong64) and simulator_run' ['arch == riscv64', { # These tests need larger stack size on simulator. diff --git a/test/webkit/webkit.status b/test/webkit/webkit.status index 03b6a6e54b..bce71fd0c6 100644 --- a/test/webkit/webkit.status +++ b/test/webkit/webkit.status @@ -75,10 +75,6 @@ # Doesn't work with gcc 4.6 on arm or arm64 for some reason. 'reentrant-caching': [SKIP], }], # 'dcheck_always_on == True and (arch == arm or arch == arm64)' -['arch == mips', { - # Too slow for mips big-endian boards on bots (no FPU). - 'dfg-int-overflow-in-loop': [SKIP], -}], # 'arch == mips' ['arch == s390 or arch == s390x', { # Too slow. 'dfg-int-overflow-in-loop': [SKIP], diff --git a/tools/cppgc/gen_cmake.py b/tools/cppgc/gen_cmake.py index b4a805c07c..cbf27f02a5 100755 --- a/tools/cppgc/gen_cmake.py +++ b/tools/cppgc/gen_cmake.py @@ -246,7 +246,7 @@ option(CPPGC_ENABLE_OBJECT_NAMES "Enable object names in cppgc for debug purpose option(CPPGC_ENABLE_CAGED_HEAP "Enable heap reservation of size 4GB, only possible for 64bit archs" OFF) option(CPPGC_ENABLE_VERIFY_HEAP "Enables additional heap verification phases and checks" OFF) option(CPPGC_ENABLE_YOUNG_GENERATION "Enable young generation in cppgc" OFF) -set(CPPGC_TARGET_ARCH "x64" CACHE STRING "Target architecture, possible options: x64, x86, arm, arm64, ppc64, s390x, mipsel, mips64el") +set(CPPGC_TARGET_ARCH "x64" CACHE STRING "Target architecture, possible options: x64, x86, arm, arm64, ppc64, s390x, mips64el") set(IS_POSIX ${{UNIX}}) set(IS_MAC ${{APPLE}}) diff --git a/tools/dev/gen-tags.py b/tools/dev/gen-tags.py index 5bed28e3fa..4067e3b8ae 100755 --- a/tools/dev/gen-tags.py +++ b/tools/dev/gen-tags.py @@ -24,7 +24,8 @@ import subprocess import sys # All arches that this script understands. -ARCHES = ["ia32", "x64", "arm", "arm64", "mips", "mips64", "ppc", "s390"] +ARCHES = ["ia32", "x64", "arm", "arm64", "mips64", "ppc", "s390"] + def PrintHelpAndExit(): print(__doc__) diff --git a/tools/dev/gm.py b/tools/dev/gm.py index 79fe4db617..58a729045a 100755 --- a/tools/dev/gm.py +++ b/tools/dev/gm.py @@ -40,9 +40,9 @@ BUILD_TARGETS_ALL = ["all"] # All arches that this script understands. ARCHES = [ - "ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64", - "riscv32", "riscv64", "s390", "s390x", "android_arm", "android_arm64", - "loong64", "fuchsia_x64", "fuchsia_arm64" + "ia32", "x64", "arm", "arm64", "mips64el", "ppc", "ppc64", "riscv32", + "riscv64", "s390", "s390x", "android_arm", "android_arm64", "loong64", + "fuchsia_x64", "fuchsia_arm64" ] # Arches that get built/run when you don't specify any. DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"] @@ -324,8 +324,8 @@ class Config(object): v8_cpu = "arm" elif self.arch == "android_arm64" or self.arch == "fuchsia_arm64": v8_cpu = "arm64" - elif self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64", - "riscv64", "riscv32", "s390", "s390x", "loong64"): + elif self.arch in ("arm", "arm64", "mips64el", "ppc", "ppc64", "riscv64", + "riscv32", "s390", "s390x", "loong64"): v8_cpu = self.arch else: return [] diff --git a/tools/disasm.py b/tools/disasm.py index 71fff96c68..06b1c22b02 100644 --- a/tools/disasm.py +++ b/tools/disasm.py @@ -51,7 +51,6 @@ _ARCH_MAP = { "ia32": "-m i386", "x64": "-m i386 -M x86-64", "arm": "-m arm", # Not supported by our objdump build. - "mips": "-m mips", # Not supported by our objdump build. "arm64": "-m aarch64" } diff --git a/tools/generate-header-include-checks.py b/tools/generate-header-include-checks.py index c52b0f9663..7bef23bfb5 100755 --- a/tools/generate-header-include-checks.py +++ b/tools/generate-header-include-checks.py @@ -50,8 +50,8 @@ AUTO_EXCLUDE_PATTERNS = [ ] + [ # platform-specific headers '\\b{}\\b'.format(p) - for p in ('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', - 's390', 'ppc', 'riscv', 'riscv64', 'riscv32', 'loong64') + for p in ('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips64', 's390', + 'ppc', 'riscv', 'riscv64', 'riscv32', 'loong64') ] args = None diff --git a/tools/profiling/ll_prof.py b/tools/profiling/ll_prof.py index 998965694c..84edd63335 100755 --- a/tools/profiling/ll_prof.py +++ b/tools/profiling/ll_prof.py @@ -337,7 +337,6 @@ class LogReader(object): _ARCH_TO_POINTER_TYPE_MAP = { "ia32": ctypes.c_uint32, "arm": ctypes.c_uint32, - "mips": ctypes.c_uint32, "x64": ctypes.c_uint64, "arm64": ctypes.c_uint64 } diff --git a/tools/run_perf.py b/tools/run_perf.py index 9b4243e0ed..9313d5715c 100644 --- a/tools/run_perf.py +++ b/tools/run_perf.py @@ -142,8 +142,6 @@ from testrunner.objects.output import Output, NULL_OUTPUT SUPPORTED_ARCHS = ['arm', 'ia32', - 'mips', - 'mipsel', 'x64', 'arm64', 'riscv64'] diff --git a/tools/testrunner/build_config.py b/tools/testrunner/build_config.py index 6cb6e1dcac..099f242a24 100644 --- a/tools/testrunner/build_config.py +++ b/tools/testrunner/build_config.py @@ -6,8 +6,8 @@ from testrunner.local import utils # Increase the timeout for these: SLOW_ARCHS = [ - "arm", "arm64", "mips", "mipsel", "mips64", "mips64el", "s390", "s390x", - "riscv32", "riscv64", "loong64" + "arm", "arm64", "mips64", "mips64el", "s390", "s390x", "riscv32", "riscv64", + "loong64" ] @@ -52,7 +52,7 @@ class BuildConfig(object): self.dict_property_const_tracking = build_config[ 'v8_dict_property_const_tracking'] # Export only for MIPS target - if self.arch in ['mips', 'mipsel', 'mips64', 'mips64el']: + if self.arch in ['mips64', 'mips64el']: self._mips_arch_variant = build_config['mips_arch_variant'] self.mips_use_msa = build_config['mips_use_msa'] @@ -68,7 +68,7 @@ class BuildConfig(object): @property def is_mips_arch(self): - return self.arch in ['mipsel', 'mips', 'mips64', 'mips64el'] + return self.arch in ['mips64', 'mips64el'] @property def simd_mips(self): @@ -89,8 +89,7 @@ class BuildConfig(object): ]) # Set no_simd_hardware on architectures without Simd enabled. - if self.arch == 'mips64el' or \ - self.arch == 'mipsel': + if self.arch == 'mips64el': no_simd_hardware = not self.simd_mips if self.arch == 'loong64' or \ diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py index 2053f52526..5f9766e85c 100644 --- a/tools/testrunner/local/statusfile.py +++ b/tools/testrunner/local/statusfile.py @@ -59,9 +59,9 @@ for key in [SKIP, FAIL, PASS, CRASH, HEAVY, SLOW, FAIL_OK, NO_VARIANTS, VARIABLES = {ALWAYS: True} for var in [ "debug", "release", "big", "little", "android", "arm", "arm64", "ia32", - "mips", "mipsel", "mips64", "mips64el", "x64", "ppc", "ppc64", "s390", - "s390x", "macos", "windows", "linux", "aix", "r1", "r2", "r3", "r5", "r6", - "riscv32", "riscv64", "loong64" + "mips64", "mips64el", "x64", "ppc", "ppc64", "s390", "s390x", "macos", + "windows", "linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv32", + "riscv64", "loong64" ]: VARIABLES[var] = var diff --git a/tools/testrunner/local/utils.py b/tools/testrunner/local/utils.py index a74bfd5cf9..a73941021b 100644 --- a/tools/testrunner/local/utils.py +++ b/tools/testrunner/local/utils.py @@ -158,9 +158,8 @@ def GuessPowerProcessorVersion(): def UseSimulator(arch): machine = platform.machine() - return (machine and - (arch == "mipsel" or arch == "arm" or arch == "arm64") and - not arch.startswith(machine)) + return (machine and (arch == "arm" or arch == "arm64") and + not arch.startswith(machine)) # This will default to building the 32 bit VM even on machines that are diff --git a/tools/toolchain/BUILD.gn b/tools/toolchain/BUILD.gn index b252c5eed5..49c73df848 100644 --- a/tools/toolchain/BUILD.gn +++ b/tools/toolchain/BUILD.gn @@ -4,28 +4,6 @@ import("//build/toolchain/gcc_toolchain.gni") -gcc_toolchain("mips-bundled") { - toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-", - root_build_dir) - cc = "${toolprefix}gcc" - cxx = "${toolprefix}g++" - - readelf = "${toolprefix}readelf" - nm = "${toolprefix}nm" - ar = "${toolprefix}ar" - ld = cxx - - # Flag that sets endianness - extra_ldflags = "-EB" - extra_cppflags = "-EB" - - toolchain_args = { - current_cpu = "mips" - current_os = "linux" - is_clang = false - } -} - gcc_toolchain("mips64-bundled") { toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-", root_build_dir) @@ -48,28 +26,6 @@ gcc_toolchain("mips64-bundled") { } } -gcc_toolchain("mipsel-bundled") { - toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-", - root_build_dir) - cc = "${toolprefix}gcc" - cxx = "${toolprefix}g++" - - readelf = "${toolprefix}readelf" - nm = "${toolprefix}nm" - ar = "${toolprefix}ar" - ld = cxx - - # Flag that sets endianness - extra_ldflags = "-EL" - extra_cppflags = "-EL" - - toolchain_args = { - current_cpu = "mipsel" - current_os = "linux" - is_clang = false - } -} - gcc_toolchain("mips64el-bundled") { toolprefix = rebase_path("//tools/mips_toolchain/bin/mips-mti-linux-gnu-", root_build_dir) From 133e7f8362a0bb799db950f5f57439033991f147 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 12 Sep 2022 16:20:08 +0200 Subject: [PATCH 0064/1772] [maglev] Optimize monomorphic keyed loads Add a fast path for keyed loads that are: 1. Monomorphic, 2. Fast elements accesses, 3. Not out-of-bounds (deopt on OOB), 4. Not holey Bug: v8:7700 Change-Id: I4d46f4d0ce7065c93a9b092833fb16a8c9e9f94e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3882974 Auto-Submit: Leszek Swirski Reviewed-by: Jakob Linke Commit-Queue: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83149} --- src/diagnostics/objects-printer.cc | 2 +- src/maglev/maglev-graph-builder.cc | 73 ++++++++++++++++ src/maglev/maglev-graph-builder.h | 8 ++ src/maglev/maglev-graph-verifier.h | 4 + src/maglev/maglev-ir.cc | 131 +++++++++++++++++++++++++++++ src/maglev/maglev-ir.h | 70 +++++++++++++++ 6 files changed, 287 insertions(+), 1 deletion(-) diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index 4e8e0f829a..dc050beb1a 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -1265,7 +1265,6 @@ void FeedbackNexus::Print(std::ostream& os) { case FeedbackSlotKind::kDefineKeyedOwn: case FeedbackSlotKind::kHasKeyed: case FeedbackSlotKind::kInstanceOf: - case FeedbackSlotKind::kLoadKeyed: case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral: case FeedbackSlotKind::kStoreGlobalSloppy: case FeedbackSlotKind::kStoreGlobalStrict: @@ -1291,6 +1290,7 @@ void FeedbackNexus::Print(std::ostream& os) { } break; } + case FeedbackSlotKind::kLoadKeyed: case FeedbackSlotKind::kLoadProperty: { os << InlineCacheState2String(ic_state()); if (ic_state() == InlineCacheState::MONOMORPHIC) { diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 6d64c91bce..6b7998772d 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -22,6 +22,7 @@ #include "src/maglev/maglev-compilation-unit.h" #include "src/maglev/maglev-interpreter-frame-state.h" #include "src/maglev/maglev-ir.h" +#include "src/objects/elements-kind.h" #include "src/objects/feedback-vector.h" #include "src/objects/literal-objects-inl.h" #include "src/objects/name-inl.h" @@ -1133,6 +1134,60 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromLoadHandler( return true; } +bool MaglevGraphBuilder::TryBuildMonomorphicElementLoad( + ValueNode* object, ValueNode* index, const compiler::MapRef& map, + MaybeObjectHandle handler) { + if (handler.is_null()) return false; + + if (handler->IsSmi()) { + return TryBuildMonomorphicElementLoadFromSmiHandler( + object, index, map, handler->ToSmi().value()); + } + return false; +} + +bool MaglevGraphBuilder::TryBuildMonomorphicElementLoadFromSmiHandler( + ValueNode* object, ValueNode* index, const compiler::MapRef& map, + int32_t handler) { + LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler); + + switch (kind) { + case LoadHandler::Kind::kElement: { + if (LoadHandler::AllowOutOfBoundsBits::decode(handler)) { + return false; + } + ElementsKind elements_kind = + LoadHandler::ElementsKindBits::decode(handler); + if (!IsFastElementsKind(elements_kind)) return false; + + // TODO(leszeks): Handle holey elements. + if (IsHoleyElementsKind(elements_kind)) return false; + DCHECK(!LoadHandler::ConvertHoleBits::decode(handler)); + + BuildMapCheck(object, map); + BuildCheckSmi(index); + + if (LoadHandler::IsJsArrayBits::decode(handler)) { + DCHECK(map.IsJSArrayMap()); + AddNewNode({object, index}); + } else { + DCHECK(!map.IsJSArrayMap()); + DCHECK(map.IsJSObjectMap()); + AddNewNode({object, index}); + } + if (elements_kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) { + SetAccumulator(AddNewNode({object, index})); + } else { + DCHECK(!IsDoubleElementsKind(elements_kind)); + SetAccumulator(AddNewNode({object, index})); + } + return true; + } + default: + return false; + } +} + void MaglevGraphBuilder::VisitGetNamedProperty() { // GetNamedProperty ValueNode* object = LoadRegisterTagged(0); @@ -1226,6 +1281,8 @@ void MaglevGraphBuilder::VisitGetNamedPropertyFromSuper() { void MaglevGraphBuilder::VisitGetKeyedProperty() { // GetKeyedProperty ValueNode* object = LoadRegisterTagged(0); + // TODO(leszeks): We don't need to tag the key if it's an Int32 and a simple + // monomorphic element load. ValueNode* key = GetAccumulatorTagged(); FeedbackSlot slot = GetSlotOperand(1); compiler::FeedbackSource feedback_source{feedback(), slot}; @@ -1240,6 +1297,22 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() { DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess); return; + case compiler::ProcessedFeedback::kElementAccess: { + const compiler::ElementAccessFeedback& element_feedback = + processed_feedback.AsElementAccess(); + if (element_feedback.transition_groups().size() != 1) break; + compiler::MapRef map = MakeRefAssumeMemoryFence( + broker(), element_feedback.transition_groups()[0].front()); + + // Monomorphic load, check the handler. + // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler. + MaybeObjectHandle handler = + FeedbackNexusForSlot(slot).FindHandlerForMap(map.object()); + + if (TryBuildMonomorphicElementLoad(object, key, map, handler)) return; + break; + } + default: break; } diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index c6aab2587c..89c2c207e3 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -969,6 +969,14 @@ class MaglevGraphBuilder { const compiler::MapRef& map, LoadHandler handler); + bool TryBuildMonomorphicElementLoad(ValueNode* object, ValueNode* index, + const compiler::MapRef& map, + MaybeObjectHandle handler); + bool TryBuildMonomorphicElementLoadFromSmiHandler(ValueNode* object, + ValueNode* index, + const compiler::MapRef& map, + int32_t handler); + bool TryBuildMonomorphicStore(ValueNode* object, const compiler::MapRef& map, MaybeObjectHandle handler); bool TryBuildMonomorphicStoreFromSmiHandler(ValueNode* object, diff --git a/src/maglev/maglev-graph-verifier.h b/src/maglev/maglev-graph-verifier.h index 3ac7d270c7..e837e640bc 100644 --- a/src/maglev/maglev-graph-verifier.h +++ b/src/maglev/maglev-graph-verifier.h @@ -175,6 +175,10 @@ class MaglevGraphVerifier { case Opcode::kGenericLessThan: case Opcode::kGenericLessThanOrEqual: case Opcode::kGenericStrictEqual: + case Opcode::kCheckJSArrayBounds: + case Opcode::kCheckJSObjectElementsBounds: + case Opcode::kLoadTaggedElement: + case Opcode::kLoadDoubleElement: case Opcode::kGetIterator: case Opcode::kTaggedEqual: case Opcode::kTaggedNotEqual: diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index a72313dc56..78532c9024 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -28,6 +28,7 @@ #include "src/maglev/maglev-interpreter-frame-state.h" #include "src/maglev/maglev-ir-inl.h" #include "src/maglev/maglev-vreg-allocator.h" +#include "src/objects/instance-type.h" namespace v8 { namespace internal { @@ -1351,6 +1352,56 @@ void CheckMapsWithMigration::PrintParams( os << "(" << *map().object() << ")"; } +void CheckJSArrayBounds::AllocateVreg(MaglevVregAllocationState* vreg_state) { + UseRegister(receiver_input()); + UseRegister(index_input()); +} +void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(receiver_input()); + Register index = ToRegister(index_input()); + __ AssertNotSmi(object); + __ AssertSmi(index); + + if (FLAG_debug_code) { + __ CmpObjectType(object, JS_ARRAY_TYPE, kScratchRegister); + __ Assert(equal, AbortReason::kUnexpectedValue); + } + TaggedRegister length(kScratchRegister); + __ LoadAnyTaggedField(length, FieldOperand(object, JSArray::kLengthOffset)); + __ cmp_tagged(index, length.reg()); + __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this); +} + +void CheckJSObjectElementsBounds::AllocateVreg( + MaglevVregAllocationState* vreg_state) { + UseRegister(receiver_input()); + UseRegister(index_input()); +} +void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(receiver_input()); + Register index = ToRegister(index_input()); + __ AssertNotSmi(object); + __ AssertSmi(index); + + if (FLAG_debug_code) { + __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister); + __ Assert(greater_equal, AbortReason::kUnexpectedValue); + } + __ LoadAnyTaggedField( + kScratchRegister, + FieldOperand(object, JSReceiver::kPropertiesOrHashOffset)); + if (FLAG_debug_code) { + __ AssertNotSmi(kScratchRegister); + } + TaggedRegister length(kScratchRegister); + __ LoadAnyTaggedField( + length, FieldOperand(kScratchRegister, FixedArray::kLengthOffset)); + __ cmp_tagged(index, length.reg()); + __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this); +} + void CheckedInternalizedString::AllocateVreg( MaglevVregAllocationState* vreg_state) { UseRegister(object_input()); @@ -1440,6 +1491,86 @@ void LoadDoubleField::PrintParams(std::ostream& os, os << "(0x" << std::hex << offset() << std::dec << ")"; } +void LoadTaggedElement::AllocateVreg(MaglevVregAllocationState* vreg_state) { + UseRegister(object_input()); + UseRegister(index_input()); + DefineAsRegister(vreg_state, this); +} +void LoadTaggedElement::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + Register result_reg = ToRegister(result()); + __ AssertNotSmi(object); + if (FLAG_debug_code) { + __ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister); + __ Assert(above_equal, AbortReason::kUnexpectedValue); + } + __ DecompressAnyTagged(kScratchRegister, + FieldOperand(object, JSObject::kElementsOffset)); + if (FLAG_debug_code) { + __ CmpObjectType(kScratchRegister, FIXED_ARRAY_TYPE, kScratchRegister); + __ Assert(equal, AbortReason::kUnexpectedValue); + // Reload since CmpObjectType clobbered the scratch register. + __ DecompressAnyTagged(kScratchRegister, + FieldOperand(object, JSObject::kElementsOffset)); + } + __ AssertSmi(index); + // Zero out top bits of index reg (these were previously either zero already, + // or the cage base). This technically mutates it, but since it's a Smi, that + // doesn't matter. + __ movl(index, index); + static_assert(kSmiTagSize + kSmiShiftSize < times_tagged_size, + "Folding the Smi shift into the FixedArray entry size shift " + "only works if the shift is small"); + __ DecompressAnyTagged( + result_reg, + FieldOperand(kScratchRegister, index, + static_cast(times_tagged_size - + (kSmiTagSize + kSmiShiftSize)), + FixedArray::kHeaderSize)); +} + +void LoadDoubleElement::AllocateVreg(MaglevVregAllocationState* vreg_state) { + UseRegister(object_input()); + UseRegister(index_input()); + DefineAsRegister(vreg_state, this); +} +void LoadDoubleElement::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + DoubleRegister result_reg = ToDoubleRegister(result()); + __ AssertNotSmi(object); + if (FLAG_debug_code) { + __ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister); + __ Assert(above_equal, AbortReason::kUnexpectedValue); + } + __ DecompressAnyTagged(kScratchRegister, + FieldOperand(object, JSObject::kElementsOffset)); + if (FLAG_debug_code) { + __ CmpObjectType(kScratchRegister, FIXED_DOUBLE_ARRAY_TYPE, + kScratchRegister); + __ Assert(equal, AbortReason::kUnexpectedValue); + // Reload since CmpObjectType clobbered the scratch register. + __ DecompressAnyTagged(kScratchRegister, + FieldOperand(object, JSObject::kElementsOffset)); + } + __ AssertSmi(index); + // Zero out top bits of index reg (these were previously either zero already, + // or the cage base). This technically mutates it, but since it's a Smi, that + // doesn't matter. + __ movl(index, index); + static_assert(kSmiTagSize + kSmiShiftSize < times_8, + "Folding the Smi shift into the FixedArray entry size shift " + "only works if the shift is small"); + __ Movsd(result_reg, + FieldOperand(kScratchRegister, index, + static_cast(times_8 - + (kSmiTagSize + kSmiShiftSize)), + FixedDoubleArray::kHeaderSize)); +} + void StoreTaggedFieldNoWriteBarrier::AllocateVreg( MaglevVregAllocationState* vreg_state) { UseRegister(object_input()); diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index 8b63df2363..98083a478b 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -144,6 +144,8 @@ class CompactInterpreterFrameState; V(InitialValue) \ V(LoadTaggedField) \ V(LoadDoubleField) \ + V(LoadTaggedElement) \ + V(LoadDoubleElement) \ V(LoadGlobal) \ V(LoadNamedGeneric) \ V(LoadNamedFromSuperGeneric) \ @@ -192,6 +194,8 @@ class CompactInterpreterFrameState; V(CheckSymbol) \ V(CheckString) \ V(CheckMapsWithMigration) \ + V(CheckJSArrayBounds) \ + V(CheckJSObjectElementsBounds) \ V(GeneratorStore) \ V(JumpLoopPrologue) \ V(StoreTaggedFieldNoWriteBarrier) \ @@ -2642,6 +2646,39 @@ class CheckMapsWithMigration const CheckType check_type_; }; +class CheckJSArrayBounds : public FixedInputNodeT<2, CheckJSArrayBounds> { + using Base = FixedInputNodeT<2, CheckJSArrayBounds>; + + public: + explicit CheckJSArrayBounds(uint64_t bitfield) : Base(bitfield) {} + + static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); + + static constexpr int kReceiverIndex = 0; + static constexpr int kIndexIndex = 1; + Input& receiver_input() { return input(kReceiverIndex); } + Input& index_input() { return input(kIndexIndex); } + + DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() +}; + +class CheckJSObjectElementsBounds + : public FixedInputNodeT<2, CheckJSObjectElementsBounds> { + using Base = FixedInputNodeT<2, CheckJSObjectElementsBounds>; + + public: + explicit CheckJSObjectElementsBounds(uint64_t bitfield) : Base(bitfield) {} + + static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); + + static constexpr int kReceiverIndex = 0; + static constexpr int kIndexIndex = 1; + Input& receiver_input() { return input(kReceiverIndex); } + Input& index_input() { return input(kIndexIndex); } + + DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() +}; + class CheckedInternalizedString : public FixedInputValueNodeT<1, CheckedInternalizedString> { using Base = FixedInputValueNodeT<1, CheckedInternalizedString>; @@ -2736,6 +2773,39 @@ class LoadDoubleField : public FixedInputValueNodeT<1, LoadDoubleField> { const int offset_; }; +class LoadTaggedElement : public FixedInputValueNodeT<2, LoadTaggedElement> { + using Base = FixedInputValueNodeT<2, LoadTaggedElement>; + + public: + explicit LoadTaggedElement(uint64_t bitfield) : Base(bitfield) {} + + static constexpr OpProperties kProperties = OpProperties::Reading(); + + static constexpr int kObjectIndex = 0; + static constexpr int kIndexIndex = 1; + Input& object_input() { return input(kObjectIndex); } + Input& index_input() { return input(kIndexIndex); } + + DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() +}; + +class LoadDoubleElement : public FixedInputValueNodeT<2, LoadDoubleElement> { + using Base = FixedInputValueNodeT<2, LoadDoubleElement>; + + public: + explicit LoadDoubleElement(uint64_t bitfield) : Base(bitfield) {} + + static constexpr OpProperties kProperties = + OpProperties::Reading() | OpProperties::Float64(); + + static constexpr int kObjectIndex = 0; + static constexpr int kIndexIndex = 1; + Input& object_input() { return input(kObjectIndex); } + Input& index_input() { return input(kIndexIndex); } + + DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() +}; + class StoreTaggedFieldNoWriteBarrier : public FixedInputNodeT<2, StoreTaggedFieldNoWriteBarrier> { using Base = FixedInputNodeT<2, StoreTaggedFieldNoWriteBarrier>; From 933e3d8bdf10512c9d202c52e853ecd3abe5fa90 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Tue, 13 Sep 2022 08:18:34 +0000 Subject: [PATCH 0065/1772] Revert "[maglev] Optimize monomorphic keyed loads" This reverts commit 133e7f8362a0bb799db950f5f57439033991f147. Reason for revert: Breaks compilation for non-pointer-compressed x64 Original change's description: > [maglev] Optimize monomorphic keyed loads > > Add a fast path for keyed loads that are: > > 1. Monomorphic, > 2. Fast elements accesses, > 3. Not out-of-bounds (deopt on OOB), > 4. Not holey > > Bug: v8:7700 > Change-Id: I4d46f4d0ce7065c93a9b092833fb16a8c9e9f94e > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3882974 > Auto-Submit: Leszek Swirski > Reviewed-by: Jakob Linke > Commit-Queue: Leszek Swirski > Cr-Commit-Position: refs/heads/main@{#83149} Bug: v8:7700 Change-Id: I08e7ca3a79b383d19c6baf73a721364b859d6df3 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890916 Auto-Submit: Leszek Swirski Commit-Queue: Rubber Stamper Bot-Commit: Rubber Stamper Cr-Commit-Position: refs/heads/main@{#83150} --- src/diagnostics/objects-printer.cc | 2 +- src/maglev/maglev-graph-builder.cc | 73 ---------------- src/maglev/maglev-graph-builder.h | 8 -- src/maglev/maglev-graph-verifier.h | 4 - src/maglev/maglev-ir.cc | 131 ----------------------------- src/maglev/maglev-ir.h | 70 --------------- 6 files changed, 1 insertion(+), 287 deletions(-) diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index dc050beb1a..4e8e0f829a 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -1265,6 +1265,7 @@ void FeedbackNexus::Print(std::ostream& os) { case FeedbackSlotKind::kDefineKeyedOwn: case FeedbackSlotKind::kHasKeyed: case FeedbackSlotKind::kInstanceOf: + case FeedbackSlotKind::kLoadKeyed: case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral: case FeedbackSlotKind::kStoreGlobalSloppy: case FeedbackSlotKind::kStoreGlobalStrict: @@ -1290,7 +1291,6 @@ void FeedbackNexus::Print(std::ostream& os) { } break; } - case FeedbackSlotKind::kLoadKeyed: case FeedbackSlotKind::kLoadProperty: { os << InlineCacheState2String(ic_state()); if (ic_state() == InlineCacheState::MONOMORPHIC) { diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 6b7998772d..6d64c91bce 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -22,7 +22,6 @@ #include "src/maglev/maglev-compilation-unit.h" #include "src/maglev/maglev-interpreter-frame-state.h" #include "src/maglev/maglev-ir.h" -#include "src/objects/elements-kind.h" #include "src/objects/feedback-vector.h" #include "src/objects/literal-objects-inl.h" #include "src/objects/name-inl.h" @@ -1134,60 +1133,6 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromLoadHandler( return true; } -bool MaglevGraphBuilder::TryBuildMonomorphicElementLoad( - ValueNode* object, ValueNode* index, const compiler::MapRef& map, - MaybeObjectHandle handler) { - if (handler.is_null()) return false; - - if (handler->IsSmi()) { - return TryBuildMonomorphicElementLoadFromSmiHandler( - object, index, map, handler->ToSmi().value()); - } - return false; -} - -bool MaglevGraphBuilder::TryBuildMonomorphicElementLoadFromSmiHandler( - ValueNode* object, ValueNode* index, const compiler::MapRef& map, - int32_t handler) { - LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler); - - switch (kind) { - case LoadHandler::Kind::kElement: { - if (LoadHandler::AllowOutOfBoundsBits::decode(handler)) { - return false; - } - ElementsKind elements_kind = - LoadHandler::ElementsKindBits::decode(handler); - if (!IsFastElementsKind(elements_kind)) return false; - - // TODO(leszeks): Handle holey elements. - if (IsHoleyElementsKind(elements_kind)) return false; - DCHECK(!LoadHandler::ConvertHoleBits::decode(handler)); - - BuildMapCheck(object, map); - BuildCheckSmi(index); - - if (LoadHandler::IsJsArrayBits::decode(handler)) { - DCHECK(map.IsJSArrayMap()); - AddNewNode({object, index}); - } else { - DCHECK(!map.IsJSArrayMap()); - DCHECK(map.IsJSObjectMap()); - AddNewNode({object, index}); - } - if (elements_kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) { - SetAccumulator(AddNewNode({object, index})); - } else { - DCHECK(!IsDoubleElementsKind(elements_kind)); - SetAccumulator(AddNewNode({object, index})); - } - return true; - } - default: - return false; - } -} - void MaglevGraphBuilder::VisitGetNamedProperty() { // GetNamedProperty ValueNode* object = LoadRegisterTagged(0); @@ -1281,8 +1226,6 @@ void MaglevGraphBuilder::VisitGetNamedPropertyFromSuper() { void MaglevGraphBuilder::VisitGetKeyedProperty() { // GetKeyedProperty ValueNode* object = LoadRegisterTagged(0); - // TODO(leszeks): We don't need to tag the key if it's an Int32 and a simple - // monomorphic element load. ValueNode* key = GetAccumulatorTagged(); FeedbackSlot slot = GetSlotOperand(1); compiler::FeedbackSource feedback_source{feedback(), slot}; @@ -1297,22 +1240,6 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() { DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess); return; - case compiler::ProcessedFeedback::kElementAccess: { - const compiler::ElementAccessFeedback& element_feedback = - processed_feedback.AsElementAccess(); - if (element_feedback.transition_groups().size() != 1) break; - compiler::MapRef map = MakeRefAssumeMemoryFence( - broker(), element_feedback.transition_groups()[0].front()); - - // Monomorphic load, check the handler. - // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler. - MaybeObjectHandle handler = - FeedbackNexusForSlot(slot).FindHandlerForMap(map.object()); - - if (TryBuildMonomorphicElementLoad(object, key, map, handler)) return; - break; - } - default: break; } diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index 89c2c207e3..c6aab2587c 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -969,14 +969,6 @@ class MaglevGraphBuilder { const compiler::MapRef& map, LoadHandler handler); - bool TryBuildMonomorphicElementLoad(ValueNode* object, ValueNode* index, - const compiler::MapRef& map, - MaybeObjectHandle handler); - bool TryBuildMonomorphicElementLoadFromSmiHandler(ValueNode* object, - ValueNode* index, - const compiler::MapRef& map, - int32_t handler); - bool TryBuildMonomorphicStore(ValueNode* object, const compiler::MapRef& map, MaybeObjectHandle handler); bool TryBuildMonomorphicStoreFromSmiHandler(ValueNode* object, diff --git a/src/maglev/maglev-graph-verifier.h b/src/maglev/maglev-graph-verifier.h index e837e640bc..3ac7d270c7 100644 --- a/src/maglev/maglev-graph-verifier.h +++ b/src/maglev/maglev-graph-verifier.h @@ -175,10 +175,6 @@ class MaglevGraphVerifier { case Opcode::kGenericLessThan: case Opcode::kGenericLessThanOrEqual: case Opcode::kGenericStrictEqual: - case Opcode::kCheckJSArrayBounds: - case Opcode::kCheckJSObjectElementsBounds: - case Opcode::kLoadTaggedElement: - case Opcode::kLoadDoubleElement: case Opcode::kGetIterator: case Opcode::kTaggedEqual: case Opcode::kTaggedNotEqual: diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 78532c9024..a72313dc56 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -28,7 +28,6 @@ #include "src/maglev/maglev-interpreter-frame-state.h" #include "src/maglev/maglev-ir-inl.h" #include "src/maglev/maglev-vreg-allocator.h" -#include "src/objects/instance-type.h" namespace v8 { namespace internal { @@ -1352,56 +1351,6 @@ void CheckMapsWithMigration::PrintParams( os << "(" << *map().object() << ")"; } -void CheckJSArrayBounds::AllocateVreg(MaglevVregAllocationState* vreg_state) { - UseRegister(receiver_input()); - UseRegister(index_input()); -} -void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - Register object = ToRegister(receiver_input()); - Register index = ToRegister(index_input()); - __ AssertNotSmi(object); - __ AssertSmi(index); - - if (FLAG_debug_code) { - __ CmpObjectType(object, JS_ARRAY_TYPE, kScratchRegister); - __ Assert(equal, AbortReason::kUnexpectedValue); - } - TaggedRegister length(kScratchRegister); - __ LoadAnyTaggedField(length, FieldOperand(object, JSArray::kLengthOffset)); - __ cmp_tagged(index, length.reg()); - __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this); -} - -void CheckJSObjectElementsBounds::AllocateVreg( - MaglevVregAllocationState* vreg_state) { - UseRegister(receiver_input()); - UseRegister(index_input()); -} -void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - Register object = ToRegister(receiver_input()); - Register index = ToRegister(index_input()); - __ AssertNotSmi(object); - __ AssertSmi(index); - - if (FLAG_debug_code) { - __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister); - __ Assert(greater_equal, AbortReason::kUnexpectedValue); - } - __ LoadAnyTaggedField( - kScratchRegister, - FieldOperand(object, JSReceiver::kPropertiesOrHashOffset)); - if (FLAG_debug_code) { - __ AssertNotSmi(kScratchRegister); - } - TaggedRegister length(kScratchRegister); - __ LoadAnyTaggedField( - length, FieldOperand(kScratchRegister, FixedArray::kLengthOffset)); - __ cmp_tagged(index, length.reg()); - __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this); -} - void CheckedInternalizedString::AllocateVreg( MaglevVregAllocationState* vreg_state) { UseRegister(object_input()); @@ -1491,86 +1440,6 @@ void LoadDoubleField::PrintParams(std::ostream& os, os << "(0x" << std::hex << offset() << std::dec << ")"; } -void LoadTaggedElement::AllocateVreg(MaglevVregAllocationState* vreg_state) { - UseRegister(object_input()); - UseRegister(index_input()); - DefineAsRegister(vreg_state, this); -} -void LoadTaggedElement::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - Register object = ToRegister(object_input()); - Register index = ToRegister(index_input()); - Register result_reg = ToRegister(result()); - __ AssertNotSmi(object); - if (FLAG_debug_code) { - __ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister); - __ Assert(above_equal, AbortReason::kUnexpectedValue); - } - __ DecompressAnyTagged(kScratchRegister, - FieldOperand(object, JSObject::kElementsOffset)); - if (FLAG_debug_code) { - __ CmpObjectType(kScratchRegister, FIXED_ARRAY_TYPE, kScratchRegister); - __ Assert(equal, AbortReason::kUnexpectedValue); - // Reload since CmpObjectType clobbered the scratch register. - __ DecompressAnyTagged(kScratchRegister, - FieldOperand(object, JSObject::kElementsOffset)); - } - __ AssertSmi(index); - // Zero out top bits of index reg (these were previously either zero already, - // or the cage base). This technically mutates it, but since it's a Smi, that - // doesn't matter. - __ movl(index, index); - static_assert(kSmiTagSize + kSmiShiftSize < times_tagged_size, - "Folding the Smi shift into the FixedArray entry size shift " - "only works if the shift is small"); - __ DecompressAnyTagged( - result_reg, - FieldOperand(kScratchRegister, index, - static_cast(times_tagged_size - - (kSmiTagSize + kSmiShiftSize)), - FixedArray::kHeaderSize)); -} - -void LoadDoubleElement::AllocateVreg(MaglevVregAllocationState* vreg_state) { - UseRegister(object_input()); - UseRegister(index_input()); - DefineAsRegister(vreg_state, this); -} -void LoadDoubleElement::GenerateCode(MaglevAssembler* masm, - const ProcessingState& state) { - Register object = ToRegister(object_input()); - Register index = ToRegister(index_input()); - DoubleRegister result_reg = ToDoubleRegister(result()); - __ AssertNotSmi(object); - if (FLAG_debug_code) { - __ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister); - __ Assert(above_equal, AbortReason::kUnexpectedValue); - } - __ DecompressAnyTagged(kScratchRegister, - FieldOperand(object, JSObject::kElementsOffset)); - if (FLAG_debug_code) { - __ CmpObjectType(kScratchRegister, FIXED_DOUBLE_ARRAY_TYPE, - kScratchRegister); - __ Assert(equal, AbortReason::kUnexpectedValue); - // Reload since CmpObjectType clobbered the scratch register. - __ DecompressAnyTagged(kScratchRegister, - FieldOperand(object, JSObject::kElementsOffset)); - } - __ AssertSmi(index); - // Zero out top bits of index reg (these were previously either zero already, - // or the cage base). This technically mutates it, but since it's a Smi, that - // doesn't matter. - __ movl(index, index); - static_assert(kSmiTagSize + kSmiShiftSize < times_8, - "Folding the Smi shift into the FixedArray entry size shift " - "only works if the shift is small"); - __ Movsd(result_reg, - FieldOperand(kScratchRegister, index, - static_cast(times_8 - - (kSmiTagSize + kSmiShiftSize)), - FixedDoubleArray::kHeaderSize)); -} - void StoreTaggedFieldNoWriteBarrier::AllocateVreg( MaglevVregAllocationState* vreg_state) { UseRegister(object_input()); diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index 98083a478b..8b63df2363 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -144,8 +144,6 @@ class CompactInterpreterFrameState; V(InitialValue) \ V(LoadTaggedField) \ V(LoadDoubleField) \ - V(LoadTaggedElement) \ - V(LoadDoubleElement) \ V(LoadGlobal) \ V(LoadNamedGeneric) \ V(LoadNamedFromSuperGeneric) \ @@ -194,8 +192,6 @@ class CompactInterpreterFrameState; V(CheckSymbol) \ V(CheckString) \ V(CheckMapsWithMigration) \ - V(CheckJSArrayBounds) \ - V(CheckJSObjectElementsBounds) \ V(GeneratorStore) \ V(JumpLoopPrologue) \ V(StoreTaggedFieldNoWriteBarrier) \ @@ -2646,39 +2642,6 @@ class CheckMapsWithMigration const CheckType check_type_; }; -class CheckJSArrayBounds : public FixedInputNodeT<2, CheckJSArrayBounds> { - using Base = FixedInputNodeT<2, CheckJSArrayBounds>; - - public: - explicit CheckJSArrayBounds(uint64_t bitfield) : Base(bitfield) {} - - static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); - - static constexpr int kReceiverIndex = 0; - static constexpr int kIndexIndex = 1; - Input& receiver_input() { return input(kReceiverIndex); } - Input& index_input() { return input(kIndexIndex); } - - DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() -}; - -class CheckJSObjectElementsBounds - : public FixedInputNodeT<2, CheckJSObjectElementsBounds> { - using Base = FixedInputNodeT<2, CheckJSObjectElementsBounds>; - - public: - explicit CheckJSObjectElementsBounds(uint64_t bitfield) : Base(bitfield) {} - - static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); - - static constexpr int kReceiverIndex = 0; - static constexpr int kIndexIndex = 1; - Input& receiver_input() { return input(kReceiverIndex); } - Input& index_input() { return input(kIndexIndex); } - - DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() -}; - class CheckedInternalizedString : public FixedInputValueNodeT<1, CheckedInternalizedString> { using Base = FixedInputValueNodeT<1, CheckedInternalizedString>; @@ -2773,39 +2736,6 @@ class LoadDoubleField : public FixedInputValueNodeT<1, LoadDoubleField> { const int offset_; }; -class LoadTaggedElement : public FixedInputValueNodeT<2, LoadTaggedElement> { - using Base = FixedInputValueNodeT<2, LoadTaggedElement>; - - public: - explicit LoadTaggedElement(uint64_t bitfield) : Base(bitfield) {} - - static constexpr OpProperties kProperties = OpProperties::Reading(); - - static constexpr int kObjectIndex = 0; - static constexpr int kIndexIndex = 1; - Input& object_input() { return input(kObjectIndex); } - Input& index_input() { return input(kIndexIndex); } - - DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() -}; - -class LoadDoubleElement : public FixedInputValueNodeT<2, LoadDoubleElement> { - using Base = FixedInputValueNodeT<2, LoadDoubleElement>; - - public: - explicit LoadDoubleElement(uint64_t bitfield) : Base(bitfield) {} - - static constexpr OpProperties kProperties = - OpProperties::Reading() | OpProperties::Float64(); - - static constexpr int kObjectIndex = 0; - static constexpr int kIndexIndex = 1; - Input& object_input() { return input(kObjectIndex); } - Input& index_input() { return input(kIndexIndex); } - - DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() -}; - class StoreTaggedFieldNoWriteBarrier : public FixedInputNodeT<2, StoreTaggedFieldNoWriteBarrier> { using Base = FixedInputNodeT<2, StoreTaggedFieldNoWriteBarrier>; From d4482c07cd8aa89cbe865843329d29e86aab8e25 Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Tue, 13 Sep 2022 09:51:15 +0200 Subject: [PATCH 0066/1772] [maglev] Add NodeBase::Print() for GDB .. where we sometimes want to inspect Node contents. With this CL, for a human-readable print in gdb: print node->Print() Note: Since we use an adhoc-created graph labeller, the output can't properly identify input nodes and instead prints them as 'unregistered node'. Bug: v8:7700 Change-Id: Icba458ac1a5c43a09b815e12582443aca4e19380 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890914 Auto-Submit: Jakob Linke Commit-Queue: Jakob Linke Reviewed-by: Leszek Swirski Commit-Queue: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83151} --- src/maglev/maglev-graph-labeller.h | 2 +- src/maglev/maglev-ir.cc | 6 ++++++ src/maglev/maglev-ir.h | 3 +++ 3 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/maglev/maglev-graph-labeller.h b/src/maglev/maglev-graph-labeller.h index 93c81c38e2..076377ed11 100644 --- a/src/maglev/maglev-graph-labeller.h +++ b/src/maglev/maglev-graph-labeller.h @@ -37,7 +37,7 @@ class MaglevGraphLabeller { auto node_id_it = node_ids_.find(node); if (node_id_it == node_ids_.end()) { - os << ""; + os << ""; return; } diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index a72313dc56..36592c4ae1 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -345,6 +345,12 @@ void NodeBase::Print(std::ostream& os, MaglevGraphLabeller* graph_labeller, UNREACHABLE(); } +void NodeBase::Print() const { + MaglevGraphLabeller labeller; + Print(std::cout, &labeller); + std::cout << std::endl; +} + namespace { size_t GetInputLocationsArraySize(const MaglevCompilationUnit& compilation_unit, const CheckpointedInterpreterState& state) { diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index 8b63df2363..5a06e2ace7 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -871,6 +871,9 @@ class NodeBase : public ZoneObject { void Print(std::ostream& os, MaglevGraphLabeller*, bool skip_targets = false) const; + // For GDB: Print any Node with `print node->Print()`. + void Print() const; + EagerDeoptInfo* eager_deopt_info() { DCHECK(properties().can_eager_deopt()); DCHECK(!properties().can_lazy_deopt()); From 06e8df41d5e11363f052fe248f8d1aaba9fb7d34 Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Tue, 13 Sep 2022 10:21:29 +0200 Subject: [PATCH 0067/1772] [maglev] Conservatively mark nodes with builtins calls as .. Throw|LazyDeopt. Whether a builtin can Throw|LazyDeopt depends on the implementation, so to be safe all builtin calls should be marked as such - UNLESS we know for certain that one or the other doesn't happen. Drive-by: For calls with two result registers, properly consider the second register in a few spots. Bug: v8:7700 Change-Id: Icbcffb51e9760761a2f4e32d79af33abccb8f1cb Fixed: chromium:1361245 Fixed: chromium:1360800 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3879617 Reviewed-by: Leszek Swirski Auto-Submit: Jakob Linke Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#83152} --- src/maglev/maglev-compiler.cc | 2 +- src/maglev/maglev-graph-builder.cc | 31 ++++++++++++-------------- src/maglev/maglev-graph-builder.h | 20 +++++++++++------ src/maglev/maglev-graph-printer.cc | 2 +- src/maglev/maglev-ir.cc | 17 +++++++++++++++ src/maglev/maglev-ir.h | 35 +++++++++++++++++++++++------- src/maglev/maglev-regalloc.cc | 5 +++-- 7 files changed, 76 insertions(+), 36 deletions(-) diff --git a/src/maglev/maglev-compiler.cc b/src/maglev/maglev-compiler.cc index 7a58211d7d..bcc253b692 100644 --- a/src/maglev/maglev-compiler.cc +++ b/src/maglev/maglev-compiler.cc @@ -222,7 +222,7 @@ class UseMarkingProcessor { register_frame->ForEachValue( deopt_info->unit, [&](ValueNode* node, interpreter::Register reg) { // Skip over the result location. - if (reg == deopt_info->result_location) return; + if (deopt_info->IsResultRegister(reg)) return; MarkUse(node, use_id, &deopt_info->input_locations[index++], loop_used_nodes); }); diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 6d64c91bce..ace747ead5 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -2008,7 +2008,8 @@ void MaglevGraphBuilder::VisitCallRuntimeForPair() { for (int i = 0; i < args.register_count(); ++i) { call_runtime->set_arg(i, GetTaggedValue(args[i])); } - StoreRegisterPair(iterator_.GetRegisterOperand(3), call_runtime); + auto result = iterator_.GetRegisterPairOperand(3); + StoreRegisterPair(result, call_runtime); } void MaglevGraphBuilder::VisitInvokeIntrinsic() { @@ -2324,21 +2325,19 @@ void MaglevGraphBuilder::VisitCreateArrayLiteral() { int bytecode_flags = GetFlag8Operand(2); int literal_flags = interpreter::CreateArrayLiteralFlags::FlagsBits::decode(bytecode_flags); - ValueNode* result; if (interpreter::CreateArrayLiteralFlags::FastCloneSupportedBit::decode( bytecode_flags)) { // TODO(victorgomes): CreateShallowArrayLiteral should not need the // boilerplate descriptor. However the current builtin checks that the // feedback exists and fallsback to CreateArrayLiteral if it doesn't. - result = AddNewNode( + SetAccumulator(AddNewNode( {}, constant_elements, compiler::FeedbackSource{feedback(), slot_index}, - literal_flags); + literal_flags)); } else { - result = AddNewNode( + SetAccumulator(AddNewNode( {}, constant_elements, compiler::FeedbackSource{feedback(), slot_index}, - literal_flags); + literal_flags)); } - SetAccumulator(result); } void MaglevGraphBuilder::VisitCreateArrayFromIterable() { @@ -2361,21 +2360,19 @@ void MaglevGraphBuilder::VisitCreateObjectLiteral() { int bytecode_flags = GetFlag8Operand(2); int literal_flags = interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags); - ValueNode* result; if (interpreter::CreateObjectLiteralFlags::FastCloneSupportedBit::decode( bytecode_flags)) { // TODO(victorgomes): CreateShallowObjectLiteral should not need the // boilerplate descriptor. However the current builtin checks that the // feedback exists and fallsback to CreateObjectLiteral if it doesn't. - result = AddNewNode( + SetAccumulator(AddNewNode( {}, boilerplate_desc, compiler::FeedbackSource{feedback(), slot_index}, - literal_flags); + literal_flags)); } else { - result = AddNewNode( + SetAccumulator(AddNewNode( {}, boilerplate_desc, compiler::FeedbackSource{feedback(), slot_index}, - literal_flags); + literal_flags)); } - SetAccumulator(result); } void MaglevGraphBuilder::VisitCreateEmptyObjectLiteral() { @@ -2751,10 +2748,10 @@ void MaglevGraphBuilder::VisitForInPrepare() { // |cache_info_triple + 2|, with the registers holding cache_type, // cache_array, and cache_length respectively. interpreter::Register first = iterator_.GetRegisterOperand(0); - interpreter::Register second(first.index() + 1); - interpreter::Register third(first.index() + 2); - StoreRegister(second, result); - StoreRegister(third, GetSecondValue(result)); + auto array_and_length = + std::make_pair(interpreter::Register{first.index() + 1}, + interpreter::Register{first.index() + 2}); + StoreRegisterPair(array_and_length, result); } void MaglevGraphBuilder::VisitForInContinue() { diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index c6aab2587c..c1c9aad075 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -787,21 +787,27 @@ class MaglevGraphBuilder { if (!IsConstantNode(value->opcode())) { DCHECK_NE(0, new_nodes_.count(value)); } - MarkAsLazyDeoptResult(value, target); + MarkAsLazyDeoptResult(value, target, 1); current_interpreter_frame_.set(target, value); } - void StoreRegisterPair(interpreter::Register target, CallRuntime* value) { + template + void StoreRegisterPair( + std::pair target, + NodeT* value) { + const interpreter::Register target0 = target.first; + const interpreter::Register target1 = target.second; + + DCHECK_EQ(interpreter::Register(target0.index() + 1), target1); DCHECK_EQ(value->ReturnCount(), 2); DCHECK_NE(0, new_nodes_.count(value)); - MarkAsLazyDeoptResult(value, target, value->ReturnCount()); - current_interpreter_frame_.set(target, value); + MarkAsLazyDeoptResult(value, target0, value->ReturnCount()); + current_interpreter_frame_.set(target0, value); ValueNode* second_value = GetSecondValue(value); DCHECK_NE(0, new_nodes_.count(second_value)); - current_interpreter_frame_.set(interpreter::Register(target.index() + 1), - second_value); + current_interpreter_frame_.set(target1, second_value); } CheckpointedInterpreterState GetLatestCheckpointedState() { @@ -832,7 +838,7 @@ class MaglevGraphBuilder { template void MarkAsLazyDeoptResult(NodeT* value, interpreter::Register result_location, - int result_size = 1) { + int result_size) { DCHECK_EQ(NodeT::kProperties.can_lazy_deopt(), value->properties().can_lazy_deopt()); if constexpr (NodeT::kProperties.can_lazy_deopt()) { diff --git a/src/maglev/maglev-graph-printer.cc b/src/maglev/maglev-graph-printer.cc index 6022884399..95f0165a77 100644 --- a/src/maglev/maglev-graph-printer.cc +++ b/src/maglev/maglev-graph-printer.cc @@ -430,7 +430,7 @@ void PrintLazyDeopt(std::ostream& os, std::vector targets, os << ", "; } os << reg.ToString() << ":"; - if (reg == deopt_info->result_location) { + if (deopt_info->IsResultRegister(reg)) { os << ""; } else { os << PrintNodeLabel(graph_labeller, node) << ":" diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 36592c4ae1..8ac5d7364f 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -379,6 +379,15 @@ DeoptInfo::DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit, } } +bool LazyDeoptInfo::IsResultRegister(interpreter::Register reg) const { + if (V8_LIKELY(result_size == 1)) { + return reg == result_location; + } + DCHECK_EQ(result_size, 2); + return reg == result_location || + reg == interpreter::Register(result_location.index() + 1); +} + // --- // Nodes // --- @@ -707,6 +716,7 @@ void ForInPrepare::GenerateCode(MaglevAssembler* masm, TaggedIndex::FromIntptr(feedback().index())); __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector); __ CallBuiltin(Builtin::kForInPrepare); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } void ForInNext::AllocateVreg(MaglevVregAllocationState* vreg_state) { @@ -752,6 +762,7 @@ void GetIterator::GenerateCode(MaglevAssembler* masm, TaggedIndex::FromIntptr(call_slot())); __ Move(D::GetRegisterParameter(D::kMaybeFeedbackVector), feedback()); __ CallBuiltin(Builtin::kGetIteratorWithFeedback); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } void GetSecondReturnedValue::AllocateVreg( @@ -905,6 +916,7 @@ void CreateEmptyArrayLiteral::GenerateCode(MaglevAssembler* masm, __ Move(D::GetRegisterParameter(D::kSlot), Smi::FromInt(feedback().index())); __ Move(D::GetRegisterParameter(D::kFeedbackVector), feedback().vector); __ CallBuiltin(Builtin::kCreateEmptyArrayLiteral); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } void CreateArrayLiteral::AllocateVreg(MaglevVregAllocationState* vreg_state) { @@ -935,6 +947,7 @@ void CreateShallowArrayLiteral::GenerateCode(MaglevAssembler* masm, constant_elements().object()); __ Move(D::GetRegisterParameter(D::kFlags), Smi::FromInt(flags())); __ CallBuiltin(Builtin::kCreateShallowArrayLiteral); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } void CreateObjectLiteral::AllocateVreg(MaglevVregAllocationState* vreg_state) { @@ -988,6 +1001,7 @@ void CreateShallowObjectLiteral::GenerateCode(MaglevAssembler* masm, __ Move(D::GetRegisterParameter(D::kDesc), boilerplate_descriptor().object()); __ Move(D::GetRegisterParameter(D::kFlags), Smi::FromInt(flags())); __ CallBuiltin(Builtin::kCreateShallowObjectLiteral); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } void CreateFunctionContext::AllocateVreg( @@ -1028,6 +1042,7 @@ void CreateFunctionContext::GenerateCode(MaglevAssembler* masm, __ Move(D::GetRegisterParameter(D::kSlots), Immediate(slot_count())); __ CallBuiltin(Builtin::kFastNewFunctionContextEval); } + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } void CreateFunctionContext::PrintParams( std::ostream& os, MaglevGraphLabeller* graph_labeller) const { @@ -1049,6 +1064,7 @@ void FastCreateClosure::GenerateCode(MaglevAssembler* masm, shared_function_info().object()); __ Move(D::GetRegisterParameter(D::kFeedbackCell), feedback_cell().object()); __ CallBuiltin(Builtin::kFastNewClosure); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } void FastCreateClosure::PrintParams(std::ostream& os, MaglevGraphLabeller* graph_labeller) const { @@ -1107,6 +1123,7 @@ void GetTemplateObject::GenerateCode(MaglevAssembler* masm, __ Move(D::GetRegisterParameter(D::kSlot), feedback().slot.ToInt()); __ Move(D::GetRegisterParameter(D::kShared), shared_function_info_.object()); __ CallBuiltin(Builtin::kGetTemplateObject); + masm->DefineExceptionHandlerAndLazyDeoptPoint(this); } void Abort::AllocateVreg(MaglevVregAllocationState* vreg_state) {} diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index 5a06e2ace7..37f5a38d1e 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -525,6 +525,14 @@ class OpProperties { static constexpr OpProperties NeedsRegisterSnapshot() { return OpProperties(kNeedsRegisterSnapshotBit::encode(true)); } + // Without auditing the call target, we must assume it can cause a lazy deopt + // and throw. Use this when codegen calls runtime or a builtin, unless + // certain that the target either doesn't throw or cannot deopt. + // TODO(jgruber): Go through all nodes marked with this property and decide + // whether to keep it (or remove either the lazy-deopt or throw flag). + static constexpr OpProperties GenericRuntimeOrBuiltinCall() { + return Call() | NonMemorySideEffects() | LazyDeopt() | Throw(); + } static constexpr OpProperties JSCall() { return Call() | NonMemorySideEffects() | LazyDeopt() | Throw(); } @@ -680,6 +688,8 @@ class LazyDeoptInfo : public DeoptInfo { CheckpointedInterpreterState checkpoint) : DeoptInfo(zone, compilation_unit, checkpoint) {} + bool IsResultRegister(interpreter::Register reg) const; + int deopting_call_return_pc = -1; interpreter::Register result_location = interpreter::Register::invalid_value(); @@ -1983,13 +1993,16 @@ class ForInPrepare : public FixedInputValueNodeT<2, ForInPrepare> { explicit ForInPrepare(uint64_t bitfield, compiler::FeedbackSource& feedback) : Base(bitfield), feedback_(feedback) {} - static constexpr OpProperties kProperties = OpProperties::Call(); + static constexpr OpProperties kProperties = + OpProperties::GenericRuntimeOrBuiltinCall(); compiler::FeedbackSource feedback() const { return feedback_; } Input& context() { return Node::input(0); } Input& enumerator() { return Node::input(1); } + int ReturnCount() const { return 2; } + DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() private: @@ -2221,7 +2234,8 @@ class CreateEmptyArrayLiteral compiler::FeedbackSource feedback() const { return feedback_; } // The implementation currently calls runtime. - static constexpr OpProperties kProperties = OpProperties::Call(); + static constexpr OpProperties kProperties = + OpProperties::GenericRuntimeOrBuiltinCall(); DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() @@ -2275,7 +2289,8 @@ class CreateShallowArrayLiteral int flags() const { return flags_; } // The implementation currently calls runtime. - static constexpr OpProperties kProperties = OpProperties::Call(); + static constexpr OpProperties kProperties = + OpProperties::GenericRuntimeOrBuiltinCall(); DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() @@ -2357,7 +2372,8 @@ class CreateShallowObjectLiteral int flags() const { return flags_; } // The implementation currently calls runtime. - static constexpr OpProperties kProperties = OpProperties::Call(); + static constexpr OpProperties kProperties = + OpProperties::GenericRuntimeOrBuiltinCall(); DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() @@ -2387,7 +2403,8 @@ class CreateFunctionContext Input& context() { return input(0); } // The implementation currently calls runtime. - static constexpr OpProperties kProperties = OpProperties::Call(); + static constexpr OpProperties kProperties = + OpProperties::GenericRuntimeOrBuiltinCall(); DECL_NODE_INTERFACE() @@ -2416,7 +2433,8 @@ class FastCreateClosure : public FixedInputValueNodeT<1, FastCreateClosure> { Input& context() { return input(0); } // The implementation currently calls runtime. - static constexpr OpProperties kProperties = OpProperties::Call(); + static constexpr OpProperties kProperties = + OpProperties::GenericRuntimeOrBuiltinCall(); DECL_NODE_INTERFACE() @@ -2682,7 +2700,8 @@ class GetTemplateObject : public FixedInputValueNodeT<1, GetTemplateObject> { feedback_(feedback) {} // The implementation currently calls runtime. - static constexpr OpProperties kProperties = OpProperties::Call(); + static constexpr OpProperties kProperties = + OpProperties::GenericRuntimeOrBuiltinCall(); Input& description() { return input(0); } @@ -3328,7 +3347,7 @@ class CallRuntime : public ValueNodeT { set_input(i + kFixedInputCount, node); } - int ReturnCount() { + int ReturnCount() const { return Runtime::FunctionForId(function_id())->result_size; } diff --git a/src/maglev/maglev-regalloc.cc b/src/maglev/maglev-regalloc.cc index 8a1a2ab15e..63d88529d8 100644 --- a/src/maglev/maglev-regalloc.cc +++ b/src/maglev/maglev-regalloc.cc @@ -521,8 +521,9 @@ void StraightForwardRegisterAllocator::UpdateUse( // See also: UpdateUse(EagerDeoptInfo&). checkpoint_state->ForEachValue( deopt_info.unit, [&](ValueNode* node, interpreter::Register reg) { - // Skip over the result location. - if (reg == deopt_info.result_location) return; + // Skip over the result location since it is irrelevant for lazy deopts + // (unoptimized code will recreate the result). + if (deopt_info.IsResultRegister(reg)) return; if (FLAG_trace_maglev_regalloc) { printing_visitor_->os() << "- using " << PrintNodeLabel(graph_labeller(), node) << "\n"; From e1dbe835d7b24ea4aa52a23e9376bb10d1a8a82b Mon Sep 17 00:00:00 2001 From: Hao Xu Date: Tue, 13 Sep 2022 21:40:05 +0800 Subject: [PATCH 0068/1772] [csa][codegen] Optimize IsStrong/IsWeakOrCleared The way to determine whether a MaybeObject is a strong or weak reference to the heap object is to check its lowest two bits. However, if the MaybeObject is known to not be a smi, that is, the lowest bit is known to be 1, we can check one bit instead. This allows Turbofan to select better instructions: x64: Before: movl r9,r11 andl r9,0x3 cmpb r9l,0x1 After: testb r11,0x2 arm64: Before: and w8, w7, #0x3 cmp w8, #0x1 (1) b.ne #+0x320 After: tbnz w7, #1, #+0x320 Change-Id: I03623183406ad7d920c96a752651e0116a22832e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3861310 Reviewed-by: Nico Hartmann Commit-Queue: Hao A Xu Reviewed-by: Igor Sheludko Cr-Commit-Position: refs/heads/main@{#83153} --- include/v8-internal.h | 1 + src/codegen/code-stub-assembler.cc | 19 +++++++++++ src/codegen/code-stub-assembler.h | 4 +++ src/compiler/code-assembler.cc | 19 ----------- src/compiler/code-assembler.h | 13 ++------ src/diagnostics/objects-printer.cc | 25 --------------- src/ic/accessor-assembler.cc | 28 ++++++++-------- src/ic/accessor-assembler.h | 10 +++--- src/interpreter/interpreter-assembler.cc | 4 +-- src/objects/object-type.cc | 41 +++++++++++++++++++++--- src/objects/object-type.h | 1 + src/objects/tagged-impl.cc | 30 +++++++++++++++++ 12 files changed, 115 insertions(+), 80 deletions(-) diff --git a/include/v8-internal.h b/include/v8-internal.h index ed6aff1426..2009db9cbb 100644 --- a/include/v8-internal.h +++ b/include/v8-internal.h @@ -52,6 +52,7 @@ const int kHeapObjectTag = 1; const int kWeakHeapObjectTag = 3; const int kHeapObjectTagSize = 2; const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; +const intptr_t kHeapObjectReferenceTagMask = 1 << (kHeapObjectTagSize - 1); // Tag information for fowarding pointers stored in object headers. // 0b00 at the lowest 2 bits in the header indicates that the map word is a diff --git a/src/codegen/code-stub-assembler.cc b/src/codegen/code-stub-assembler.cc index d3cd497885..6501007027 100644 --- a/src/codegen/code-stub-assembler.cc +++ b/src/codegen/code-stub-assembler.cc @@ -2067,12 +2067,24 @@ TNode CodeStubAssembler::IsStrong(TNode value) { Int32Constant(kHeapObjectTag)); } +TNode CodeStubAssembler::IsStrong(TNode value) { + return IsNotSetWord32( + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)), + kHeapObjectReferenceTagMask); +} + TNode CodeStubAssembler::GetHeapObjectIfStrong( TNode value, Label* if_not_strong) { GotoIfNot(IsStrong(value), if_not_strong); return CAST(value); } +TNode CodeStubAssembler::GetHeapObjectIfStrong( + TNode value, Label* if_not_strong) { + GotoIfNot(IsStrong(value), if_not_strong); + return ReinterpretCast(value); +} + TNode CodeStubAssembler::IsWeakOrCleared(TNode value) { return Word32Equal(Word32And(TruncateIntPtrToInt32( BitcastTaggedToWordForTagAndSmiBits(value)), @@ -2080,6 +2092,13 @@ TNode CodeStubAssembler::IsWeakOrCleared(TNode value) { Int32Constant(kWeakHeapObjectTag)); } +TNode CodeStubAssembler::IsWeakOrCleared( + TNode value) { + return IsSetWord32( + TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(value)), + kHeapObjectReferenceTagMask); +} + TNode CodeStubAssembler::IsCleared(TNode value) { return Word32Equal(TruncateIntPtrToInt32(BitcastMaybeObjectToWord(value)), Int32Constant(kClearedWeakHeapObjectLower32)); diff --git a/src/codegen/code-stub-assembler.h b/src/codegen/code-stub-assembler.h index 6b07a7c55a..b53b8465ba 100644 --- a/src/codegen/code-stub-assembler.h +++ b/src/codegen/code-stub-assembler.h @@ -1473,10 +1473,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TVariable* extracted); // See MaybeObject for semantics of these functions. TNode IsStrong(TNode value); + TNode IsStrong(TNode value); TNode GetHeapObjectIfStrong(TNode value, Label* if_not_strong); + TNode GetHeapObjectIfStrong(TNode value, + Label* if_not_strong); TNode IsWeakOrCleared(TNode value); + TNode IsWeakOrCleared(TNode value); TNode IsCleared(TNode value); TNode IsNotCleared(TNode value) { return Word32BinaryNot(IsCleared(value)); diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc index 8263b70bac..d07482f24c 100644 --- a/src/compiler/code-assembler.cc +++ b/src/compiler/code-assembler.cc @@ -230,25 +230,6 @@ bool CodeAssembler::IsWord64CtzSupported() const { return raw_assembler()->machine()->Word64Ctz().IsSupported(); } -#ifdef DEBUG -void CodeAssembler::GenerateCheckMaybeObjectIsObject(TNode node, - const char* location) { - Label ok(this); - GotoIf(WordNotEqual(WordAnd(BitcastMaybeObjectToWord(node), - IntPtrConstant(kHeapObjectTagMask)), - IntPtrConstant(kWeakHeapObjectTag)), - &ok); - base::EmbeddedVector message; - SNPrintF(message, "no Object: %s", location); - TNode message_node = StringConstant(message.begin()); - // This somewhat misuses the AbortCSADcheck runtime function. This will print - // "abort: CSA_DCHECK failed: ", which is good enough. - AbortCSADcheck(message_node); - Unreachable(); - Bind(&ok); -} -#endif - TNode CodeAssembler::Int32Constant(int32_t value) { return UncheckedCast(jsgraph()->Int32Constant(value)); } diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h index b58b1754e5..a071b31c60 100644 --- a/src/compiler/code-assembler.h +++ b/src/compiler/code-assembler.h @@ -147,6 +147,7 @@ OBJECT_TYPE_CASE(Object) OBJECT_TYPE_CASE(Smi) OBJECT_TYPE_CASE(TaggedIndex) OBJECT_TYPE_CASE(HeapObject) +OBJECT_TYPE_CASE(HeapObjectReference) OBJECT_TYPE_LIST(OBJECT_TYPE_CASE) HEAP_OBJECT_ORDINARY_TYPE_LIST(OBJECT_TYPE_CASE) STRUCT_LIST(OBJECT_TYPE_STRUCT_CASE) @@ -425,7 +426,8 @@ class V8_EXPORT_PRIVATE CodeAssembler { static_assert(types_have_common_values::value, "Incompatible types: this cast can never succeed."); - static_assert(std::is_convertible, TNode>::value, + static_assert(std::is_convertible, TNode>::value || + std::is_convertible, TNode>::value, "Coercion to untagged values cannot be " "checked."); static_assert( @@ -434,10 +436,6 @@ class V8_EXPORT_PRIVATE CodeAssembler { "Unnecessary CAST: types are convertible."); #ifdef DEBUG if (FLAG_debug_code) { - if (std::is_same::value) { - code_assembler_->GenerateCheckMaybeObjectIsObject( - TNode::UncheckedCast(node_), location_); - } TNode function = code_assembler_->ExternalConstant( ExternalReference::check_object_type()); code_assembler_->CallCFunction( @@ -502,11 +500,6 @@ class V8_EXPORT_PRIVATE CodeAssembler { #define TORQUE_CAST(x) ca_.Cast(x) #endif -#ifdef DEBUG - void GenerateCheckMaybeObjectIsObject(TNode node, - const char* location); -#endif - // Constants. TNode Int32Constant(int32_t value); TNode Int64Constant(int64_t value); diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index 4e8e0f829a..021581cd6f 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -2599,31 +2599,6 @@ void PreparseData::PreparseDataPrint(std::ostream& os) { os << "\n"; } -template -void TaggedImpl::Print() { - StdoutStream os; - this->Print(os); - os << std::flush; -} - -template -void TaggedImpl::Print(std::ostream& os) { - Smi smi; - HeapObject heap_object; - if (ToSmi(&smi)) { - smi.SmiPrint(os); - } else if (IsCleared()) { - os << "[cleared]"; - } else if (GetHeapObjectIfWeak(&heap_object)) { - os << "[weak] "; - heap_object.HeapObjectPrint(os); - } else if (GetHeapObjectIfStrong(&heap_object)) { - heap_object.HeapObjectPrint(os); - } else { - UNREACHABLE(); - } -} - void HeapNumber::HeapNumberPrint(std::ostream& os) { HeapNumberShortPrint(os); os << "\n"; diff --git a/src/ic/accessor-assembler.cc b/src/ic/accessor-assembler.cc index c723f449c2..f98413fc55 100644 --- a/src/ic/accessor-assembler.cc +++ b/src/ic/accessor-assembler.cc @@ -69,7 +69,7 @@ TNode AccessorAssembler::LoadHandlerDataField( return LoadMaybeWeakObjectField(handler, offset); } -TNode AccessorAssembler::TryMonomorphicCase( +TNode AccessorAssembler::TryMonomorphicCase( TNode slot, TNode vector, TNode lookup_start_object_map, Label* if_handler, TVariable* var_handler, Label* if_miss) { @@ -84,9 +84,8 @@ TNode AccessorAssembler::TryMonomorphicCase( // into ElementOffsetFromIndex() allows it to be folded into a single // [base, index, offset] indirect memory access on x64. TNode offset = ElementOffsetFromIndex(slot, HOLEY_ELEMENTS); - TNode feedback = ReinterpretCast( - Load(MachineType::AnyTagged(), vector, - IntPtrAdd(offset, IntPtrConstant(header_size)))); + TNode feedback = CAST(Load( + vector, IntPtrAdd(offset, IntPtrConstant(header_size)))); // Try to quickly handle the monomorphic case without knowing for sure // if we have a weak reference in feedback. @@ -1383,7 +1382,8 @@ void AccessorAssembler::HandleStoreICHandlerCase( BIND(&if_nonsmi_handler); { - GotoIf(IsWeakOrCleared(handler), &store_transition_or_global); + TNode ref_handler = CAST(handler); + GotoIf(IsWeakOrCleared(ref_handler), &store_transition_or_global); TNode strong_handler = CAST(handler); TNode handler_map = LoadMap(strong_handler); Branch(IsCodeTMap(handler_map), &call_handler, &if_proto_handler); @@ -3047,7 +3047,7 @@ void AccessorAssembler::LoadIC_BytecodeHandler(const LazyLoadICParameters* p, TVARIABLE(MaybeObject, var_handler); Label try_polymorphic(this), if_handler(this, &var_handler); - TNode feedback = TryMonomorphicCase( + TNode feedback = TryMonomorphicCase( p->slot(), CAST(p->vector()), lookup_start_object_map, &if_handler, &var_handler, &try_polymorphic); @@ -3113,7 +3113,7 @@ void AccessorAssembler::LoadIC(const LoadICParameters* p) { GotoIf(IsUndefined(p->vector()), &no_feedback); // Check monomorphic case. - TNode feedback = + TNode feedback = TryMonomorphicCase(p->slot(), CAST(p->vector()), lookup_start_object_map, &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); @@ -3169,7 +3169,7 @@ void AccessorAssembler::LoadSuperIC(const LoadICParameters* p) { TNode lookup_start_object_map = LoadMap(CAST(p->lookup_start_object())); GotoIf(IsDeprecatedMap(lookup_start_object_map), &miss); - TNode feedback = + TNode feedback = TryMonomorphicCase(p->slot(), CAST(p->vector()), lookup_start_object_map, &if_handler, &var_handler, &try_polymorphic); @@ -3480,7 +3480,7 @@ void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p, GotoIf(IsUndefined(p->vector()), &generic); // Check monomorphic case. - TNode feedback = + TNode feedback = TryMonomorphicCase(p->slot(), CAST(p->vector()), lookup_start_object_map, &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); @@ -3731,7 +3731,7 @@ void AccessorAssembler::StoreIC(const StoreICParameters* p) { GotoIf(IsUndefined(p->vector()), &no_feedback); // Check monomorphic case. - TNode feedback = + TNode feedback = TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map, &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); @@ -3929,7 +3929,7 @@ void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p) { GotoIf(IsUndefined(p->vector()), &no_feedback); // Check monomorphic case. - TNode feedback = + TNode feedback = TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map, &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); @@ -4003,7 +4003,7 @@ void AccessorAssembler::DefineKeyedOwnIC(const StoreICParameters* p) { GotoIf(IsUndefined(p->vector()), &no_feedback); // Check monomorphic case. - TNode feedback = + TNode feedback = TryMonomorphicCase(p->slot(), CAST(p->vector()), receiver_map, &if_handler, &var_handler, &try_polymorphic); BIND(&if_handler); @@ -4074,7 +4074,7 @@ void AccessorAssembler::StoreInArrayLiteralIC(const StoreICParameters* p) { GotoIf(IsUndefined(p->vector()), &no_feedback); - TNode feedback = + TNode feedback = TryMonomorphicCase(p->slot(), CAST(p->vector()), array_map, &if_handler, &var_handler, &try_polymorphic); @@ -4879,7 +4879,7 @@ void AccessorAssembler::GenerateCloneObjectIC() { GotoIf(IsUndefined(maybe_vector), &slow); - TNode feedback = + TNode feedback = TryMonomorphicCase(slot, CAST(maybe_vector), source_map, &if_handler, &var_handler, &try_polymorphic); diff --git a/src/ic/accessor-assembler.h b/src/ic/accessor-assembler.h index 2f3c44f116..40445355f3 100644 --- a/src/ic/accessor-assembler.h +++ b/src/ic/accessor-assembler.h @@ -346,12 +346,10 @@ class V8_EXPORT_PRIVATE AccessorAssembler : public CodeStubAssembler { // IC dispatcher behavior. // Checks monomorphic case. Returns {feedback} entry of the vector. - TNode TryMonomorphicCase(TNode slot, - TNode vector, - TNode lookup_start_object_map, - Label* if_handler, - TVariable* var_handler, - Label* if_miss); + TNode TryMonomorphicCase( + TNode slot, TNode vector, + TNode lookup_start_object_map, Label* if_handler, + TVariable* var_handler, Label* if_miss); void HandlePolymorphicCase(TNode lookup_start_object_map, TNode feedback, Label* if_handler, TVariable* var_handler, diff --git a/src/interpreter/interpreter-assembler.cc b/src/interpreter/interpreter-assembler.cc index 0c10c69350..14e06e2d27 100644 --- a/src/interpreter/interpreter-assembler.cc +++ b/src/interpreter/interpreter-assembler.cc @@ -868,8 +868,8 @@ TNode InterpreterAssembler::ConstructWithSpread( IncrementCallCount(feedback_vector, slot_id); // Check if we have monomorphic {new_target} feedback already. - TNode feedback = - LoadFeedbackVectorSlot(feedback_vector, slot_id); + TNode feedback = + CAST(LoadFeedbackVectorSlot(feedback_vector, slot_id)); Branch(IsWeakReferenceToObject(feedback, new_target), &construct, &extra_checks); diff --git a/src/objects/object-type.cc b/src/objects/object-type.cc index dc48234034..0983ed510a 100644 --- a/src/objects/object-type.cc +++ b/src/objects/object-type.cc @@ -14,11 +14,42 @@ namespace internal { Address CheckObjectType(Address raw_value, Address raw_type, Address raw_location) { #ifdef DEBUG - Object value(raw_value); - Smi type(raw_type); + ObjectType type = static_cast(Smi(raw_type).value()); String location = String::cast(Object(raw_location)); const char* expected; - switch (static_cast(type.value())) { + + if (HAS_WEAK_HEAP_OBJECT_TAG(raw_value)) { + if (type == ObjectType::kHeapObjectReference) return Smi::FromInt(0).ptr(); + // Casts of weak references are not allowed, one should use + // GetHeapObjectIfStrong / GetHeapObjectAssumeWeak first. + switch (type) { +#define TYPE_CASE(Name) \ + case ObjectType::k##Name: \ + expected = #Name; \ + break; +#define TYPE_STRUCT_CASE(NAME, Name, name) \ + case ObjectType::k##Name: \ + expected = #Name; \ + break; + + TYPE_CASE(Object) + TYPE_CASE(Smi) + TYPE_CASE(TaggedIndex) + TYPE_CASE(HeapObject) + TYPE_CASE(HeapObjectReference) + OBJECT_TYPE_LIST(TYPE_CASE) + HEAP_OBJECT_TYPE_LIST(TYPE_CASE) + STRUCT_LIST(TYPE_STRUCT_CASE) +#undef TYPE_CASE +#undef TYPE_STRUCT_CASE + } + } else { + Object value(raw_value); + switch (type) { + case ObjectType::kHeapObjectReference: + if (!value.IsSmi()) return Smi::FromInt(0).ptr(); + expected = "HeapObjectReference"; + break; #define TYPE_CASE(Name) \ case ObjectType::k##Name: \ if (value.Is##Name()) return Smi::FromInt(0).ptr(); \ @@ -39,9 +70,11 @@ Address CheckObjectType(Address raw_value, Address raw_type, STRUCT_LIST(TYPE_STRUCT_CASE) #undef TYPE_CASE #undef TYPE_STRUCT_CASE + } } + MaybeObject maybe_value(raw_value); std::stringstream value_description; - value.Print(value_description); + maybe_value.Print(value_description); FATAL( "Type cast failed in %s\n" " Expected %s but found %s", diff --git a/src/objects/object-type.h b/src/objects/object-type.h index d3de80721d..d5410ad13b 100644 --- a/src/objects/object-type.h +++ b/src/objects/object-type.h @@ -20,6 +20,7 @@ enum class ObjectType { ENUM_ELEMENT(Smi) // ENUM_ELEMENT(TaggedIndex) // ENUM_ELEMENT(HeapObject) // + ENUM_ELEMENT(HeapObjectReference) // OBJECT_TYPE_LIST(ENUM_ELEMENT) // HEAP_OBJECT_TYPE_LIST(ENUM_ELEMENT) // STRUCT_LIST(ENUM_STRUCT_ELEMENT) // diff --git a/src/objects/tagged-impl.cc b/src/objects/tagged-impl.cc index 9ae89fba19..bc48297aca 100644 --- a/src/objects/tagged-impl.cc +++ b/src/objects/tagged-impl.cc @@ -7,6 +7,8 @@ #include #include "src/objects/objects.h" +#include "src/objects/smi.h" +#include "src/objects/tagged-impl-inl.h" #include "src/strings/string-stream.h" #include "src/utils/ostreams.h" @@ -52,6 +54,34 @@ void TaggedImpl::ShortPrint(std::ostream& os) { os << Brief(*this); } +#ifdef OBJECT_PRINT +template +void TaggedImpl::Print() { + StdoutStream os; + this->Print(os); + os << std::flush; +} + +template +void TaggedImpl::Print(std::ostream& os) { + Smi smi(0); + HeapObject heap_object; + if (ToSmi(&smi)) { + os << "Smi: " << std::hex << "0x" << smi.value(); + os << std::dec << " (" << smi.value() << ")\n"; + } else if (IsCleared()) { + os << "[cleared]"; + } else if (GetHeapObjectIfWeak(&heap_object)) { + os << "[weak] "; + heap_object.HeapObjectPrint(os); + } else if (GetHeapObjectIfStrong(&heap_object)) { + heap_object.HeapObjectPrint(os); + } else { + UNREACHABLE(); + } +} +#endif // OBJECT_PRINT + // Explicit instantiation declarations. template class TaggedImpl; template class TaggedImpl; From 277d37e0af7a57a01bddb6b1609e426b65dae290 Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Tue, 13 Sep 2022 11:29:35 +0200 Subject: [PATCH 0069/1772] [build] Fix build flag deps with v8_multi_arch_build v8_multi_arch_build toggles v8_enable_pointer_compression, but some other flags are set depending on v8_enable_pointer_compression. Previously the v8_multi_arch_build condition was resetting some of these in its branch, but we can make this simpler by moving the pointer compression toggle earlier, immediately after the default pointer compression setting. Change-Id: Ie5f4e73f947b693d4ba2abe4e1cf30009a2bbb2c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890918 Reviewed-by: Igor Sheludko Commit-Queue: Leszek Swirski Auto-Submit: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83154} --- BUILD.gn | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/BUILD.gn b/BUILD.gn index bbe5ec3c00..29a4773526 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -418,6 +418,21 @@ if (v8_enable_pointer_compression == "") { v8_enable_pointer_compression = v8_current_cpu == "arm64" || v8_current_cpu == "x64" } + +# Toggle pointer compression for correctness fuzzing when building the +# clang_x64_pointer_compression toolchain. We'll correctness-compare the +# default build with the clang_x64_pointer_compression build. +if (v8_multi_arch_build && + rebase_path(get_label_info(":d8", "root_out_dir"), root_build_dir) == + "clang_x64_pointer_compression") { + v8_enable_pointer_compression = !v8_enable_pointer_compression +} + +# Ensure the sandbox is on/off in the same way as pointer compression for +# correctness fuzzing builds. +if (v8_multi_arch_build) { + v8_enable_sandbox = v8_enable_pointer_compression +} if (v8_enable_pointer_compression_shared_cage == "") { v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression } @@ -481,23 +496,6 @@ assert(!v8_enable_trace_ignition || v8_enable_trace_unoptimized, assert(!v8_enable_trace_baseline_exec || v8_enable_trace_unoptimized, "Baseline tracing requires unoptimized tracing to be enabled.") -# Toggle pointer compression for correctness fuzzing when building the -# clang_x64_pointer_compression toolchain. We'll correctness-compare the -# default build with the clang_x64_pointer_compression build. -if (v8_multi_arch_build && - rebase_path(get_label_info(":d8", "root_out_dir"), root_build_dir) == - "clang_x64_pointer_compression") { - v8_enable_pointer_compression = !v8_enable_pointer_compression - v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression - v8_enable_external_code_space = v8_enable_pointer_compression -} - -# Ensure the sandbox is on/off in the same way as pointer compression for -# correctness fuzzing builds. -if (v8_multi_arch_build) { - v8_enable_sandbox = v8_enable_pointer_compression -} - # Check if it is a Chromium build and activate PAC/BTI if needed. # TODO(cavalcantii): have a single point of integration with PAC/BTI flags. if (build_with_chromium && v8_current_cpu == "arm64" && From a9327e9394a41d16e5df1450cb41f89cbf599f2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Samuel=20Gro=C3=9F?= Date: Mon, 12 Sep 2022 11:40:00 +0000 Subject: [PATCH 0070/1772] [sandbox] Schedule GC when EPT utilization reaches certain thresholds MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit During ExternalPointerTable::Grow, if we cross one of a handful of predefined utilization thresholds, we now request a (major) GC to free up entries that are no longer used in the table. Bug: v8:10391 Change-Id: Id2d262f0f1d4dc37aec1e4978a8be2d223fb2b2b Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890971 Commit-Queue: Samuel Groß Reviewed-by: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#83155} --- src/sandbox/external-pointer-table.cc | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/sandbox/external-pointer-table.cc b/src/sandbox/external-pointer-table.cc index a72e5cbff0..3f57b87569 100644 --- a/src/sandbox/external-pointer-table.cc +++ b/src/sandbox/external-pointer-table.cc @@ -309,6 +309,18 @@ uint32_t ExternalPointerTable::Grow(Isolate* isolate) { set_capacity(new_capacity); + // Schedule GC when the table's utilization crosses one of these thresholds. + constexpr double kGCThresholds[] = {0.5, 0.75, 0.9, 0.95, 0.99}; + constexpr double kMaxCapacity = static_cast(kMaxExternalPointers); + double old_utilization = static_cast(old_capacity) / kMaxCapacity; + double new_utilization = static_cast(new_capacity) / kMaxCapacity; + for (double threshold : kGCThresholds) { + if (old_utilization < threshold && new_utilization >= threshold) { + isolate->heap()->ReportExternalMemoryPressure(); + break; + } + } + // Build freelist bottom to top, which might be more cache friendly. uint32_t start = std::max(old_capacity, 1); // Skip entry zero uint32_t last = new_capacity - 1; From 3501fca7e5af2692db0d924f61f8ca508c9e333d Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Mon, 12 Sep 2022 16:20:08 +0200 Subject: [PATCH 0071/1772] Reland "[maglev] Optimize monomorphic keyed loads" This is a reland of commit 133e7f8362a0bb799db950f5f57439033991f147 Reland: Rebase onto v8_multi_arch_build fix. Original change's description: > [maglev] Optimize monomorphic keyed loads > > Add a fast path for keyed loads that are: > > 1. Monomorphic, > 2. Fast elements accesses, > 3. Not out-of-bounds (deopt on OOB), > 4. Not holey > > Bug: v8:7700 > Change-Id: I4d46f4d0ce7065c93a9b092833fb16a8c9e9f94e > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3882974 > Auto-Submit: Leszek Swirski > Reviewed-by: Jakob Linke > Commit-Queue: Leszek Swirski > Cr-Commit-Position: refs/heads/main@{#83149} Bug: v8:7700 Change-Id: Ib48bdc8729757527c19d0b24864f8eab0570c3f3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890920 Commit-Queue: Jakob Linke Reviewed-by: Jakob Linke Auto-Submit: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83156} --- src/diagnostics/objects-printer.cc | 2 +- src/maglev/maglev-graph-builder.cc | 73 ++++++++++++++++ src/maglev/maglev-graph-builder.h | 8 ++ src/maglev/maglev-graph-verifier.h | 4 + src/maglev/maglev-ir.cc | 131 +++++++++++++++++++++++++++++ src/maglev/maglev-ir.h | 70 +++++++++++++++ 6 files changed, 287 insertions(+), 1 deletion(-) diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index 021581cd6f..55270dfe2b 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -1265,7 +1265,6 @@ void FeedbackNexus::Print(std::ostream& os) { case FeedbackSlotKind::kDefineKeyedOwn: case FeedbackSlotKind::kHasKeyed: case FeedbackSlotKind::kInstanceOf: - case FeedbackSlotKind::kLoadKeyed: case FeedbackSlotKind::kDefineKeyedOwnPropertyInLiteral: case FeedbackSlotKind::kStoreGlobalSloppy: case FeedbackSlotKind::kStoreGlobalStrict: @@ -1291,6 +1290,7 @@ void FeedbackNexus::Print(std::ostream& os) { } break; } + case FeedbackSlotKind::kLoadKeyed: case FeedbackSlotKind::kLoadProperty: { os << InlineCacheState2String(ic_state()); if (ic_state() == InlineCacheState::MONOMORPHIC) { diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index ace747ead5..a45609954f 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -22,6 +22,7 @@ #include "src/maglev/maglev-compilation-unit.h" #include "src/maglev/maglev-interpreter-frame-state.h" #include "src/maglev/maglev-ir.h" +#include "src/objects/elements-kind.h" #include "src/objects/feedback-vector.h" #include "src/objects/literal-objects-inl.h" #include "src/objects/name-inl.h" @@ -1133,6 +1134,60 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromLoadHandler( return true; } +bool MaglevGraphBuilder::TryBuildMonomorphicElementLoad( + ValueNode* object, ValueNode* index, const compiler::MapRef& map, + MaybeObjectHandle handler) { + if (handler.is_null()) return false; + + if (handler->IsSmi()) { + return TryBuildMonomorphicElementLoadFromSmiHandler( + object, index, map, handler->ToSmi().value()); + } + return false; +} + +bool MaglevGraphBuilder::TryBuildMonomorphicElementLoadFromSmiHandler( + ValueNode* object, ValueNode* index, const compiler::MapRef& map, + int32_t handler) { + LoadHandler::Kind kind = LoadHandler::KindBits::decode(handler); + + switch (kind) { + case LoadHandler::Kind::kElement: { + if (LoadHandler::AllowOutOfBoundsBits::decode(handler)) { + return false; + } + ElementsKind elements_kind = + LoadHandler::ElementsKindBits::decode(handler); + if (!IsFastElementsKind(elements_kind)) return false; + + // TODO(leszeks): Handle holey elements. + if (IsHoleyElementsKind(elements_kind)) return false; + DCHECK(!LoadHandler::ConvertHoleBits::decode(handler)); + + BuildMapCheck(object, map); + BuildCheckSmi(index); + + if (LoadHandler::IsJsArrayBits::decode(handler)) { + DCHECK(map.IsJSArrayMap()); + AddNewNode({object, index}); + } else { + DCHECK(!map.IsJSArrayMap()); + DCHECK(map.IsJSObjectMap()); + AddNewNode({object, index}); + } + if (elements_kind == ElementsKind::PACKED_DOUBLE_ELEMENTS) { + SetAccumulator(AddNewNode({object, index})); + } else { + DCHECK(!IsDoubleElementsKind(elements_kind)); + SetAccumulator(AddNewNode({object, index})); + } + return true; + } + default: + return false; + } +} + void MaglevGraphBuilder::VisitGetNamedProperty() { // GetNamedProperty ValueNode* object = LoadRegisterTagged(0); @@ -1226,6 +1281,8 @@ void MaglevGraphBuilder::VisitGetNamedPropertyFromSuper() { void MaglevGraphBuilder::VisitGetKeyedProperty() { // GetKeyedProperty ValueNode* object = LoadRegisterTagged(0); + // TODO(leszeks): We don't need to tag the key if it's an Int32 and a simple + // monomorphic element load. ValueNode* key = GetAccumulatorTagged(); FeedbackSlot slot = GetSlotOperand(1); compiler::FeedbackSource feedback_source{feedback(), slot}; @@ -1240,6 +1297,22 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() { DeoptimizeReason::kInsufficientTypeFeedbackForGenericKeyedAccess); return; + case compiler::ProcessedFeedback::kElementAccess: { + const compiler::ElementAccessFeedback& element_feedback = + processed_feedback.AsElementAccess(); + if (element_feedback.transition_groups().size() != 1) break; + compiler::MapRef map = MakeRefAssumeMemoryFence( + broker(), element_feedback.transition_groups()[0].front()); + + // Monomorphic load, check the handler. + // TODO(leszeks): Make GetFeedbackForPropertyAccess read the handler. + MaybeObjectHandle handler = + FeedbackNexusForSlot(slot).FindHandlerForMap(map.object()); + + if (TryBuildMonomorphicElementLoad(object, key, map, handler)) return; + break; + } + default: break; } diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index c1c9aad075..5f34fc5350 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -975,6 +975,14 @@ class MaglevGraphBuilder { const compiler::MapRef& map, LoadHandler handler); + bool TryBuildMonomorphicElementLoad(ValueNode* object, ValueNode* index, + const compiler::MapRef& map, + MaybeObjectHandle handler); + bool TryBuildMonomorphicElementLoadFromSmiHandler(ValueNode* object, + ValueNode* index, + const compiler::MapRef& map, + int32_t handler); + bool TryBuildMonomorphicStore(ValueNode* object, const compiler::MapRef& map, MaybeObjectHandle handler); bool TryBuildMonomorphicStoreFromSmiHandler(ValueNode* object, diff --git a/src/maglev/maglev-graph-verifier.h b/src/maglev/maglev-graph-verifier.h index 3ac7d270c7..e837e640bc 100644 --- a/src/maglev/maglev-graph-verifier.h +++ b/src/maglev/maglev-graph-verifier.h @@ -175,6 +175,10 @@ class MaglevGraphVerifier { case Opcode::kGenericLessThan: case Opcode::kGenericLessThanOrEqual: case Opcode::kGenericStrictEqual: + case Opcode::kCheckJSArrayBounds: + case Opcode::kCheckJSObjectElementsBounds: + case Opcode::kLoadTaggedElement: + case Opcode::kLoadDoubleElement: case Opcode::kGetIterator: case Opcode::kTaggedEqual: case Opcode::kTaggedNotEqual: diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index 8ac5d7364f..de48bb491b 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -28,6 +28,7 @@ #include "src/maglev/maglev-interpreter-frame-state.h" #include "src/maglev/maglev-ir-inl.h" #include "src/maglev/maglev-vreg-allocator.h" +#include "src/objects/instance-type.h" namespace v8 { namespace internal { @@ -1374,6 +1375,56 @@ void CheckMapsWithMigration::PrintParams( os << "(" << *map().object() << ")"; } +void CheckJSArrayBounds::AllocateVreg(MaglevVregAllocationState* vreg_state) { + UseRegister(receiver_input()); + UseRegister(index_input()); +} +void CheckJSArrayBounds::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(receiver_input()); + Register index = ToRegister(index_input()); + __ AssertNotSmi(object); + __ AssertSmi(index); + + if (FLAG_debug_code) { + __ CmpObjectType(object, JS_ARRAY_TYPE, kScratchRegister); + __ Assert(equal, AbortReason::kUnexpectedValue); + } + TaggedRegister length(kScratchRegister); + __ LoadAnyTaggedField(length, FieldOperand(object, JSArray::kLengthOffset)); + __ cmp_tagged(index, length.reg()); + __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this); +} + +void CheckJSObjectElementsBounds::AllocateVreg( + MaglevVregAllocationState* vreg_state) { + UseRegister(receiver_input()); + UseRegister(index_input()); +} +void CheckJSObjectElementsBounds::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(receiver_input()); + Register index = ToRegister(index_input()); + __ AssertNotSmi(object); + __ AssertSmi(index); + + if (FLAG_debug_code) { + __ CmpObjectType(object, FIRST_JS_OBJECT_TYPE, kScratchRegister); + __ Assert(greater_equal, AbortReason::kUnexpectedValue); + } + __ LoadAnyTaggedField( + kScratchRegister, + FieldOperand(object, JSReceiver::kPropertiesOrHashOffset)); + if (FLAG_debug_code) { + __ AssertNotSmi(kScratchRegister); + } + TaggedRegister length(kScratchRegister); + __ LoadAnyTaggedField( + length, FieldOperand(kScratchRegister, FixedArray::kLengthOffset)); + __ cmp_tagged(index, length.reg()); + __ EmitEagerDeoptIf(above_equal, DeoptimizeReason::kOutOfBounds, this); +} + void CheckedInternalizedString::AllocateVreg( MaglevVregAllocationState* vreg_state) { UseRegister(object_input()); @@ -1463,6 +1514,86 @@ void LoadDoubleField::PrintParams(std::ostream& os, os << "(0x" << std::hex << offset() << std::dec << ")"; } +void LoadTaggedElement::AllocateVreg(MaglevVregAllocationState* vreg_state) { + UseRegister(object_input()); + UseRegister(index_input()); + DefineAsRegister(vreg_state, this); +} +void LoadTaggedElement::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + Register result_reg = ToRegister(result()); + __ AssertNotSmi(object); + if (FLAG_debug_code) { + __ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister); + __ Assert(above_equal, AbortReason::kUnexpectedValue); + } + __ DecompressAnyTagged(kScratchRegister, + FieldOperand(object, JSObject::kElementsOffset)); + if (FLAG_debug_code) { + __ CmpObjectType(kScratchRegister, FIXED_ARRAY_TYPE, kScratchRegister); + __ Assert(equal, AbortReason::kUnexpectedValue); + // Reload since CmpObjectType clobbered the scratch register. + __ DecompressAnyTagged(kScratchRegister, + FieldOperand(object, JSObject::kElementsOffset)); + } + __ AssertSmi(index); + // Zero out top bits of index reg (these were previously either zero already, + // or the cage base). This technically mutates it, but since it's a Smi, that + // doesn't matter. + __ movl(index, index); + static_assert(kSmiTagSize + kSmiShiftSize < times_tagged_size, + "Folding the Smi shift into the FixedArray entry size shift " + "only works if the shift is small"); + __ DecompressAnyTagged( + result_reg, + FieldOperand(kScratchRegister, index, + static_cast(times_tagged_size - + (kSmiTagSize + kSmiShiftSize)), + FixedArray::kHeaderSize)); +} + +void LoadDoubleElement::AllocateVreg(MaglevVregAllocationState* vreg_state) { + UseRegister(object_input()); + UseRegister(index_input()); + DefineAsRegister(vreg_state, this); +} +void LoadDoubleElement::GenerateCode(MaglevAssembler* masm, + const ProcessingState& state) { + Register object = ToRegister(object_input()); + Register index = ToRegister(index_input()); + DoubleRegister result_reg = ToDoubleRegister(result()); + __ AssertNotSmi(object); + if (FLAG_debug_code) { + __ CmpObjectType(object, JS_OBJECT_TYPE, kScratchRegister); + __ Assert(above_equal, AbortReason::kUnexpectedValue); + } + __ DecompressAnyTagged(kScratchRegister, + FieldOperand(object, JSObject::kElementsOffset)); + if (FLAG_debug_code) { + __ CmpObjectType(kScratchRegister, FIXED_DOUBLE_ARRAY_TYPE, + kScratchRegister); + __ Assert(equal, AbortReason::kUnexpectedValue); + // Reload since CmpObjectType clobbered the scratch register. + __ DecompressAnyTagged(kScratchRegister, + FieldOperand(object, JSObject::kElementsOffset)); + } + __ AssertSmi(index); + // Zero out top bits of index reg (these were previously either zero already, + // or the cage base). This technically mutates it, but since it's a Smi, that + // doesn't matter. + __ movl(index, index); + static_assert(kSmiTagSize + kSmiShiftSize < times_8, + "Folding the Smi shift into the FixedArray entry size shift " + "only works if the shift is small"); + __ Movsd(result_reg, + FieldOperand(kScratchRegister, index, + static_cast(times_8 - + (kSmiTagSize + kSmiShiftSize)), + FixedDoubleArray::kHeaderSize)); +} + void StoreTaggedFieldNoWriteBarrier::AllocateVreg( MaglevVregAllocationState* vreg_state) { UseRegister(object_input()); diff --git a/src/maglev/maglev-ir.h b/src/maglev/maglev-ir.h index 37f5a38d1e..a1766807f9 100644 --- a/src/maglev/maglev-ir.h +++ b/src/maglev/maglev-ir.h @@ -144,6 +144,8 @@ class CompactInterpreterFrameState; V(InitialValue) \ V(LoadTaggedField) \ V(LoadDoubleField) \ + V(LoadTaggedElement) \ + V(LoadDoubleElement) \ V(LoadGlobal) \ V(LoadNamedGeneric) \ V(LoadNamedFromSuperGeneric) \ @@ -192,6 +194,8 @@ class CompactInterpreterFrameState; V(CheckSymbol) \ V(CheckString) \ V(CheckMapsWithMigration) \ + V(CheckJSArrayBounds) \ + V(CheckJSObjectElementsBounds) \ V(GeneratorStore) \ V(JumpLoopPrologue) \ V(StoreTaggedFieldNoWriteBarrier) \ @@ -2663,6 +2667,39 @@ class CheckMapsWithMigration const CheckType check_type_; }; +class CheckJSArrayBounds : public FixedInputNodeT<2, CheckJSArrayBounds> { + using Base = FixedInputNodeT<2, CheckJSArrayBounds>; + + public: + explicit CheckJSArrayBounds(uint64_t bitfield) : Base(bitfield) {} + + static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); + + static constexpr int kReceiverIndex = 0; + static constexpr int kIndexIndex = 1; + Input& receiver_input() { return input(kReceiverIndex); } + Input& index_input() { return input(kIndexIndex); } + + DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() +}; + +class CheckJSObjectElementsBounds + : public FixedInputNodeT<2, CheckJSObjectElementsBounds> { + using Base = FixedInputNodeT<2, CheckJSObjectElementsBounds>; + + public: + explicit CheckJSObjectElementsBounds(uint64_t bitfield) : Base(bitfield) {} + + static constexpr OpProperties kProperties = OpProperties::EagerDeopt(); + + static constexpr int kReceiverIndex = 0; + static constexpr int kIndexIndex = 1; + Input& receiver_input() { return input(kReceiverIndex); } + Input& index_input() { return input(kIndexIndex); } + + DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() +}; + class CheckedInternalizedString : public FixedInputValueNodeT<1, CheckedInternalizedString> { using Base = FixedInputValueNodeT<1, CheckedInternalizedString>; @@ -2758,6 +2795,39 @@ class LoadDoubleField : public FixedInputValueNodeT<1, LoadDoubleField> { const int offset_; }; +class LoadTaggedElement : public FixedInputValueNodeT<2, LoadTaggedElement> { + using Base = FixedInputValueNodeT<2, LoadTaggedElement>; + + public: + explicit LoadTaggedElement(uint64_t bitfield) : Base(bitfield) {} + + static constexpr OpProperties kProperties = OpProperties::Reading(); + + static constexpr int kObjectIndex = 0; + static constexpr int kIndexIndex = 1; + Input& object_input() { return input(kObjectIndex); } + Input& index_input() { return input(kIndexIndex); } + + DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() +}; + +class LoadDoubleElement : public FixedInputValueNodeT<2, LoadDoubleElement> { + using Base = FixedInputValueNodeT<2, LoadDoubleElement>; + + public: + explicit LoadDoubleElement(uint64_t bitfield) : Base(bitfield) {} + + static constexpr OpProperties kProperties = + OpProperties::Reading() | OpProperties::Float64(); + + static constexpr int kObjectIndex = 0; + static constexpr int kIndexIndex = 1; + Input& object_input() { return input(kObjectIndex); } + Input& index_input() { return input(kIndexIndex); } + + DECL_NODE_INTERFACE_WITH_EMPTY_PRINT_PARAMS() +}; + class StoreTaggedFieldNoWriteBarrier : public FixedInputNodeT<2, StoreTaggedFieldNoWriteBarrier> { using Base = FixedInputNodeT<2, StoreTaggedFieldNoWriteBarrier>; From e03af96c3d7b371974e26d3d704a2876d0b3764c Mon Sep 17 00:00:00 2001 From: Al Muthanna Athamina Date: Tue, 13 Sep 2022 11:59:11 +0200 Subject: [PATCH 0072/1772] [infra] Remove old predictable Linux bots Bug: v8:13052 Change-Id: Ida65f95547006e6fa2542362c59f20c60a63a9af Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3893852 Reviewed-by: Michael Achenbach Commit-Queue: Almothana Athamneh Cr-Commit-Position: refs/heads/main@{#83157} --- infra/mb/mb_config.pyl | 3 --- infra/testing/builders.pyl | 10 ---------- 2 files changed, 13 deletions(-) diff --git a/infra/mb/mb_config.pyl b/infra/mb/mb_config.pyl index e1c268a0c1..5488996a64 100644 --- a/infra/mb/mb_config.pyl +++ b/infra/mb/mb_config.pyl @@ -126,7 +126,6 @@ 'V8 Linux - vtunejit': 'debug_x86_vtunejit', 'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage', 'V8 Linux64 - Fuzzilli - builder': 'release_x64_fuzzilli', - 'V8 Linux - predictable - builder': 'release_x86_predictable', 'V8 Linux64 - predictable - builder': 'release_x64_predictable', 'V8 Linux - full debug builder': 'full_debug_x86', 'V8 Mac64 - full debug builder': 'full_debug_x64', @@ -645,8 +644,6 @@ 'release_trybot', 'x86', 'v8_no_i18n'], 'release_x64_predictable': [ 'release_bot', 'x64', 'v8_enable_verify_predictable'], - 'release_x86_predictable': [ - 'release_bot', 'x86', 'v8_enable_verify_predictable'], 'release_x86_shared_verify_heap': [ 'release', 'x86', 'goma', 'shared', 'v8_verify_heap'], 'release_x86_trybot': [ diff --git a/infra/testing/builders.pyl b/infra/testing/builders.pyl index 50343ec320..dffb14dfa8 100644 --- a/infra/testing/builders.pyl +++ b/infra/testing/builders.pyl @@ -1205,16 +1205,6 @@ {'name': 'v8testing', 'variant': 'default'}, ], }, - 'V8 Linux - predictable': { - 'swarming_dimensions': { - 'os': 'Ubuntu-18.04', - }, - 'tests': [ - {'name': 'benchmarks'}, - {'name': 'd8testing'}, - {'name': 'mozilla'}, - ], - }, 'V8 Linux64 - predictable': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', From e28c7178eec5b06490a5dcacc49908a905dbd3ba Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Mon, 12 Sep 2022 12:17:53 +0200 Subject: [PATCH 0073/1772] [heap] Fix FillCurrentPage for PagedNewSpace. FillCurrentPage assumed that everything after top is empty, which doesn't work with MinorMC and sweeping. Revise FillCurrentPage based SimulateFullSpace for MinorMC. I similar implementation is provided both in unittests and cctest. Migrating affected cctest to unittests is left a future work. Bug: v8:12612 Change-Id: Ie29be2fc7aaee25e1fd5f66b1c0959c2a45f007f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885888 Reviewed-by: Michael Lippautz Commit-Queue: Omer Katz Cr-Commit-Position: refs/heads/main@{#83158} --- src/heap/free-list.h | 10 ++ src/heap/paged-spaces-inl.h | 21 --- src/heap/paged-spaces.cc | 21 +++ src/heap/paged-spaces.h | 4 +- src/heap/spaces.h | 2 +- test/cctest/heap/heap-utils.cc | 98 +++++++++++- test/cctest/heap/heap-utils.h | 4 +- test/cctest/heap/test-heap.cc | 3 +- test/cctest/test-shared-strings.cc | 4 +- test/unittests/heap/heap-utils.cc | 239 ++++++++++++++++------------- test/unittests/heap/heap-utils.h | 14 +- 11 files changed, 271 insertions(+), 149 deletions(-) diff --git a/src/heap/free-list.h b/src/heap/free-list.h index c8fb58a36a..b3ed8375c3 100644 --- a/src/heap/free-list.h +++ b/src/heap/free-list.h @@ -78,6 +78,14 @@ class FreeListCategory { size_t SumFreeList(); int FreeListLength(); + template + void IterateNodesForTesting(Callback callback) { + for (FreeSpace cur_node = top(); !cur_node.is_null(); + cur_node = cur_node.next()) { + callback(cur_node); + } + } + private: // For debug builds we accurately compute free lists lengths up until // {kVeryLongFreeList} by manually walking the list. @@ -182,6 +190,8 @@ class FreeList { size_t wasted_bytes() { return wasted_bytes_; } + size_t min_block_size() const { return min_block_size_; } + template void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) { FreeListCategory* current = categories_[type]; diff --git a/src/heap/paged-spaces-inl.h b/src/heap/paged-spaces-inl.h index d7b1e1aa99..341cc40569 100644 --- a/src/heap/paged-spaces-inl.h +++ b/src/heap/paged-spaces-inl.h @@ -57,27 +57,6 @@ bool PagedSpaceBase::Contains(Object o) const { return Page::FromAddress(o.ptr())->owner() == this; } -void PagedSpaceBase::UnlinkFreeListCategories(Page* page) { - DCHECK_EQ(this, page->owner()); - page->ForAllFreeListCategories([this](FreeListCategory* category) { - free_list()->RemoveCategory(category); - }); -} - -size_t PagedSpaceBase::RelinkFreeListCategories(Page* page) { - DCHECK_EQ(this, page->owner()); - size_t added = 0; - page->ForAllFreeListCategories([this, &added](FreeListCategory* category) { - added += category->available(); - category->Relink(free_list()); - }); - - DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE), - page->AvailableInFreeList() == - page->AvailableInFreeListFromAllocatedBytes()); - return added; -} - bool PagedSpaceBase::TryFreeLast(Address object_address, int object_size) { if (allocation_info_.top() != kNullAddress) { return allocation_info_.DecrementTopIfAdjacent(object_address, object_size); diff --git a/src/heap/paged-spaces.cc b/src/heap/paged-spaces.cc index f1954bded9..c2c6265824 100644 --- a/src/heap/paged-spaces.cc +++ b/src/heap/paged-spaces.cc @@ -1086,6 +1086,27 @@ void PagedSpaceBase::ReduceActiveSystemPages( MemoryAllocator::GetCommitPageSize()); } +void PagedSpaceBase::UnlinkFreeListCategories(Page* page) { + DCHECK_EQ(this, page->owner()); + page->ForAllFreeListCategories([this](FreeListCategory* category) { + free_list()->RemoveCategory(category); + }); +} + +size_t PagedSpaceBase::RelinkFreeListCategories(Page* page) { + DCHECK_EQ(this, page->owner()); + size_t added = 0; + page->ForAllFreeListCategories([this, &added](FreeListCategory* category) { + added += category->available(); + category->Relink(free_list()); + }); + + DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE), + page->AvailableInFreeList() == + page->AvailableInFreeListFromAllocatedBytes()); + return added; +} + // ----------------------------------------------------------------------------- // MapSpace implementation diff --git a/src/heap/paged-spaces.h b/src/heap/paged-spaces.h index 20775a36be..7241a29b0e 100644 --- a/src/heap/paged-spaces.h +++ b/src/heap/paged-spaces.h @@ -284,8 +284,8 @@ class V8_EXPORT_PRIVATE PagedSpaceBase base::Mutex* mutex() { return &space_mutex_; } - inline void UnlinkFreeListCategories(Page* page); - inline size_t RelinkFreeListCategories(Page* page); + void UnlinkFreeListCategories(Page* page); + size_t RelinkFreeListCategories(Page* page); Page* first_page() override { return reinterpret_cast(memory_chunk_list_.front()); diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 7ba97850a2..1d60095de3 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -288,7 +288,7 @@ class Page : public MemoryChunk { } } - size_t AvailableInFreeList(); + V8_EXPORT_PRIVATE size_t AvailableInFreeList(); size_t AvailableInFreeListFromAllocatedBytes() { DCHECK_GE(area_size(), wasted_memory() + allocated_bytes()); diff --git a/test/cctest/heap/heap-utils.cc b/test/cctest/heap/heap-utils.cc index e0046030dc..3ff794a809 100644 --- a/test/cctest/heap/heap-utils.cc +++ b/test/cctest/heap/heap-utils.cc @@ -5,15 +5,19 @@ #include "test/cctest/heap/heap-utils.h" #include "src/base/platform/mutex.h" +#include "src/common/assert-scope.h" #include "src/common/globals.h" #include "src/execution/isolate.h" #include "src/heap/factory.h" +#include "src/heap/free-list.h" #include "src/heap/heap-inl.h" #include "src/heap/incremental-marking.h" #include "src/heap/mark-compact.h" #include "src/heap/marking-barrier.h" #include "src/heap/memory-chunk.h" #include "src/heap/safepoint.h" +#include "src/heap/spaces.h" +#include "src/objects/free-space-inl.h" #include "test/cctest/cctest.h" namespace v8 { @@ -130,9 +134,94 @@ std::vector> CreatePadding(Heap* heap, int padding_size, return handles; } -bool FillCurrentPage(v8::internal::NewSpace* space, +namespace { +void FillPageInPagedSpace(Page* page, + std::vector>* out_handles) { + DCHECK(page->SweepingDone()); + PagedSpaceBase* paged_space = static_cast(page->owner()); + DCHECK_EQ(kNullAddress, paged_space->top()); + DCHECK(!page->Contains(paged_space->top())); + + for (Page* p : *paged_space) { + if (p != page) paged_space->UnlinkFreeListCategories(p); + } + + // If min_block_size is larger than FixedArray::kHeaderSize, all blocks in the + // free list can be used to allocate a fixed array. This guarantees that we + // can fill the whole page. + DCHECK_LT(FixedArray::kHeaderSize, + paged_space->free_list()->min_block_size()); + + std::vector available_sizes; + // Collect all free list block sizes + page->ForAllFreeListCategories( + [&available_sizes](FreeListCategory* category) { + category->IterateNodesForTesting([&available_sizes](FreeSpace node) { + int node_size = node.Size(); + DCHECK_LT(0, FixedArrayLenFromSize(node_size)); + available_sizes.push_back(node_size); + }); + }); + + Isolate* isolate = page->heap()->isolate(); + + // Allocate as many max size arrays as possible, while making sure not to + // leave behind a block too small to fit a FixedArray. + const int max_array_length = FixedArrayLenFromSize(kMaxRegularHeapObjectSize); + for (size_t i = 0; i < available_sizes.size(); ++i) { + int available_size = available_sizes[i]; + while (available_size > + kMaxRegularHeapObjectSize + FixedArray::kHeaderSize) { + Handle fixed_array = isolate->factory()->NewFixedArray( + max_array_length, AllocationType::kYoung); + if (out_handles) out_handles->push_back(fixed_array); + available_size -= kMaxRegularHeapObjectSize; + } + if (available_size > kMaxRegularHeapObjectSize) { + // Allocate less than kMaxRegularHeapObjectSize to ensure remaining space + // can be used to allcoate another FixedArray. + int array_size = kMaxRegularHeapObjectSize - FixedArray::kHeaderSize; + Handle fixed_array = isolate->factory()->NewFixedArray( + FixedArrayLenFromSize(array_size), AllocationType::kYoung); + if (out_handles) out_handles->push_back(fixed_array); + available_size -= array_size; + } + DCHECK_LE(available_size, kMaxRegularHeapObjectSize); + DCHECK_LT(0, FixedArrayLenFromSize(available_size)); + available_sizes[i] = available_size; + } + + // Allocate FixedArrays in remaining free list blocks, from largest to + // smallest. + std::sort(available_sizes.begin(), available_sizes.end(), + [](size_t a, size_t b) { return a > b; }); + for (size_t i = 0; i < available_sizes.size(); ++i) { + int available_size = available_sizes[i]; + DCHECK_LE(available_size, kMaxRegularHeapObjectSize); + int array_length = FixedArrayLenFromSize(available_size); + DCHECK_LT(0, array_length); + Handle fixed_array = + isolate->factory()->NewFixedArray(array_length, AllocationType::kYoung); + if (out_handles) out_handles->push_back(fixed_array); + } + + for (Page* p : *paged_space) { + if (p != page) paged_space->RelinkFreeListCategories(p); + } +} +} // namespace + +void FillCurrentPage(v8::internal::NewSpace* space, std::vector>* out_handles) { - return heap::FillCurrentPageButNBytes(space, 0, out_handles); + if (v8_flags.minor_mc) { + PauseAllocationObserversScope pause_observers(space->heap()); + if (space->top() == kNullAddress) return; + Page* page = Page::FromAllocationAreaAddress(space->top()); + space->FreeLinearAllocationArea(); + FillPageInPagedSpace(page, out_handles); + } else { + FillCurrentPageButNBytes(space, 0, out_handles); + } } namespace { @@ -147,7 +236,7 @@ int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) { } } // namespace -bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes, +void FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes, std::vector>* out_handles) { PauseAllocationObserversScope pause_observers(space->heap()); // We cannot rely on `space->limit()` to point to the end of the current page @@ -158,13 +247,12 @@ bool FillCurrentPageButNBytes(v8::internal::NewSpace* space, int extra_bytes, int space_remaining = GetSpaceRemainingOnCurrentPage(space); CHECK(space_remaining >= extra_bytes); int new_linear_size = space_remaining - extra_bytes; - if (new_linear_size == 0) return false; + if (new_linear_size == 0) return; std::vector> handles = heap::CreatePadding( space->heap(), space_remaining, i::AllocationType::kYoung); if (out_handles != nullptr) { out_handles->insert(out_handles->end(), handles.begin(), handles.end()); } - return true; } void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) { diff --git a/test/cctest/heap/heap-utils.h b/test/cctest/heap/heap-utils.h index f235ca1823..d1dffd3fe2 100644 --- a/test/cctest/heap/heap-utils.h +++ b/test/cctest/heap/heap-utils.h @@ -46,10 +46,10 @@ std::vector> CreatePadding( Heap* heap, int padding_size, AllocationType allocation, int object_size = kMaxRegularHeapObjectSize); -bool FillCurrentPage(v8::internal::NewSpace* space, +void FillCurrentPage(v8::internal::NewSpace* space, std::vector>* out_handles = nullptr); -bool FillCurrentPageButNBytes( +void FillCurrentPageButNBytes( v8::internal::NewSpace* space, int extra_bytes, std::vector>* out_handles = nullptr); diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc index 0f2fe010db..90b655fdc7 100644 --- a/test/cctest/heap/test-heap.cc +++ b/test/cctest/heap/test-heap.cc @@ -3758,7 +3758,8 @@ TEST(Regress169928) { // Some flags turn Scavenge collections into Mark-sweep collections // and hence are incompatible with this test case. if (v8_flags.gc_global || v8_flags.stress_compaction || - v8_flags.stress_incremental_marking || v8_flags.single_generation) + v8_flags.stress_incremental_marking || v8_flags.single_generation || + v8_flags.minor_mc) return; // Prepare the environment diff --git a/test/cctest/test-shared-strings.cc b/test/cctest/test-shared-strings.cc index 5d02ea7c1f..29a9818fa4 100644 --- a/test/cctest/test-shared-strings.cc +++ b/test/cctest/test-shared-strings.cc @@ -966,9 +966,11 @@ UNINITIALIZED_TEST(PromotionMarkCompactOldToShared) { raw_one_byte, AllocationType::kYoung); CHECK(String::IsInPlaceInternalizable(*one_byte_seq)); CHECK(MemoryChunk::FromHeapObject(*one_byte_seq)->InYoungGeneration()); + + std::vector> handles; // Fill the page and do a full GC. Page promotion should kick in and promote // the page as is to old space. - heap::FillCurrentPage(heap->new_space()); + heap::FillCurrentPage(heap->new_space(), &handles); heap->CollectGarbage(OLD_SPACE, GarbageCollectionReason::kTesting); // Make sure 'one_byte_seq' is in old space. CHECK(!MemoryChunk::FromHeapObject(*one_byte_seq)->InYoungGeneration()); diff --git a/test/unittests/heap/heap-utils.cc b/test/unittests/heap/heap-utils.cc index 308ca434a2..b528f2826a 100644 --- a/test/unittests/heap/heap-utils.cc +++ b/test/unittests/heap/heap-utils.cc @@ -4,9 +4,16 @@ #include "test/unittests/heap/heap-utils.h" +#include + +#include "src/common/globals.h" +#include "src/flags/flags.h" #include "src/heap/incremental-marking.h" #include "src/heap/mark-compact.h" +#include "src/heap/new-spaces.h" #include "src/heap/safepoint.h" +#include "src/objects/free-space-inl.h" +#include "v8-internal.h" namespace v8 { namespace internal { @@ -36,71 +43,90 @@ void HeapInternalsBase::SimulateIncrementalMarking(Heap* heap, } } -void HeapInternalsBase::SimulateFullSpace( - v8::internal::PagedNewSpace* space, - std::vector>* out_handles) { - // If you see this check failing, disable the flag at the start of your test: - // v8_flags.stress_concurrent_allocation = false; - // Background thread allocating concurrently interferes with this function. - CHECK(!v8_flags.stress_concurrent_allocation); - Heap* heap = space->heap(); - if (heap->mark_compact_collector()->sweeping_in_progress()) { - heap->mark_compact_collector()->EnsureSweepingCompleted( - MarkCompactCollector::SweepingForcedFinalizationMode::kV8Only); - } - // MinorMC is atomic so need to ensure it is completed. +namespace { - Map unchecked_fixed_array_map = - ReadOnlyRoots(heap).unchecked_fixed_array_map(); - PagedSpaceBase* paged_space = space->paged_space(); - paged_space->FreeLinearAllocationArea(); - FreeList* free_list = paged_space->free_list(); - free_list->ForAllFreeListCategories( - [heap, paged_space, free_list, unchecked_fixed_array_map, - out_handles](FreeListCategory* category) { - // Remove category from the free list to remove it from the available - // bytes count. - free_list->RemoveCategory(category); - // Create FixedArray objects in all free list entries. - while (!category->is_empty()) { - size_t node_size; - FreeSpace node = category->PickNodeFromList(0, &node_size); - DCHECK_LT(0, node_size); - DCHECK_LE(node_size, std::numeric_limits::max()); - // Zero the memory to "initialize" it for the FixedArray. - memset(reinterpret_cast(node.address()), 0, node_size); - Address address = node.address(); - Page* page = Page::FromAddress(address); - // Fixedarray requires at least 2*kTaggedSize memory. - while (node_size >= 2 * kTaggedSize) { - // Don't create FixedArrays bigger than max normal object size. - int array_size = std::min(static_cast(node_size), - kMaxRegularHeapObjectSize); - // Convert the free space to a FixedArray - HeapObject heap_object(HeapObject::FromAddress(address)); - heap_object.set_map_after_allocation(unchecked_fixed_array_map, - SKIP_WRITE_BARRIER); - FixedArray arr(FixedArray::cast(heap_object)); - arr.set_length((array_size - FixedArray::SizeFor(0)) / kTaggedSize); - DCHECK_EQ(array_size, arr.AllocatedSize()); - if (out_handles) - out_handles->push_back(handle(arr, heap->isolate())); - // Update allocated bytes statistics for the page and the space. - page->IncreaseAllocatedBytes(array_size); - paged_space->IncreaseAllocatedBytes(array_size, page); - node_size -= array_size; - address += array_size; - } - if (node_size > 0) { - // Create a filler in any remaining memory. - DCHECK_GT(2 * kTaggedSize, node_size); - heap->CreateFillerObjectAt(address, static_cast(node_size)); - } - } - }); - paged_space->ResetFreeList(); +int FixedArrayLenFromSize(int size) { + return std::min({(size - FixedArray::kHeaderSize) / kTaggedSize, + FixedArray::kMaxRegularLength}); } +void FillPageInPagedSpace(Page* page, + std::vector>* out_handles) { + DCHECK(page->SweepingDone()); + PagedSpaceBase* paged_space = static_cast(page->owner()); + DCHECK_EQ(kNullAddress, paged_space->top()); + DCHECK(!page->Contains(paged_space->top())); + + for (Page* p : *paged_space) { + if (p != page) paged_space->UnlinkFreeListCategories(p); + } + + // If min_block_size is larger than FixedArray::kHeaderSize, all blocks in the + // free list can be used to allocate a fixed array. This guarantees that we + // can fill the whole page. + DCHECK_LT(FixedArray::kHeaderSize, + paged_space->free_list()->min_block_size()); + + std::vector available_sizes; + // Collect all free list block sizes + page->ForAllFreeListCategories( + [&available_sizes](FreeListCategory* category) { + category->IterateNodesForTesting([&available_sizes](FreeSpace node) { + int node_size = node.Size(); + DCHECK_LT(0, FixedArrayLenFromSize(node_size)); + available_sizes.push_back(node_size); + }); + }); + + Isolate* isolate = page->heap()->isolate(); + + // Allocate as many max size arrays as possible, while making sure not to + // leave behind a block too small to fit a FixedArray. + const int max_array_length = FixedArrayLenFromSize(kMaxRegularHeapObjectSize); + for (size_t i = 0; i < available_sizes.size(); ++i) { + int available_size = available_sizes[i]; + while (available_size > + kMaxRegularHeapObjectSize + FixedArray::kHeaderSize) { + Handle fixed_array = isolate->factory()->NewFixedArray( + max_array_length, AllocationType::kYoung); + if (out_handles) out_handles->push_back(fixed_array); + available_size -= kMaxRegularHeapObjectSize; + } + if (available_size > kMaxRegularHeapObjectSize) { + // Allocate less than kMaxRegularHeapObjectSize to ensure remaining space + // can be used to allcoate another FixedArray. + int array_size = kMaxRegularHeapObjectSize - FixedArray::kHeaderSize; + Handle fixed_array = isolate->factory()->NewFixedArray( + FixedArrayLenFromSize(array_size), AllocationType::kYoung); + if (out_handles) out_handles->push_back(fixed_array); + available_size -= array_size; + } + DCHECK_LE(available_size, kMaxRegularHeapObjectSize); + DCHECK_LT(0, FixedArrayLenFromSize(available_size)); + available_sizes[i] = available_size; + } + + // Allocate FixedArrays in remaining free list blocks, from largest to + // smallest. + std::sort(available_sizes.begin(), available_sizes.end(), + [](size_t a, size_t b) { return a > b; }); + for (size_t i = 0; i < available_sizes.size(); ++i) { + int available_size = available_sizes[i]; + DCHECK_LE(available_size, kMaxRegularHeapObjectSize); + int array_length = FixedArrayLenFromSize(available_size); + DCHECK_LT(0, array_length); + Handle fixed_array = + isolate->factory()->NewFixedArray(array_length, AllocationType::kYoung); + if (out_handles) out_handles->push_back(fixed_array); + } + + for (Page* p : *paged_space) { + if (p != page) paged_space->RelinkFreeListCategories(p); + } +} + +} // namespace + void HeapInternalsBase::SimulateFullSpace( v8::internal::NewSpace* space, std::vector>* out_handles) { @@ -109,10 +135,14 @@ void HeapInternalsBase::SimulateFullSpace( // Background thread allocating concurrently interferes with this function. CHECK(!v8_flags.stress_concurrent_allocation); if (v8_flags.minor_mc) { - SimulateFullSpace(PagedNewSpace::From(space), out_handles); - } else { - while (FillCurrentPage(space, out_handles) || space->AddFreshPage()) { + for (Page* page : *space) { + FillPageInPagedSpace(page, out_handles); } + DCHECK_EQ(0, space->free_list()->Available()); + } else { + do { + FillCurrentPage(space, out_handles); + } while (space->AddFreshPage()); } } @@ -131,14 +161,8 @@ void HeapInternalsBase::SimulateFullSpace(v8::internal::PagedSpace* space) { space->ResetFreeList(); } -bool HeapInternalsBase::FillCurrentPage( - v8::internal::NewSpace* space, - std::vector>* out_handles) { - return FillCurrentPageButNBytes(space, 0, out_handles); -} - namespace { -int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) { +int GetSpaceRemainingOnCurrentSemiSpacePage(v8::internal::NewSpace* space) { Address top = space->top(); if ((top & kPageAlignmentMask) == 0) { // `top` points to the start of a page signifies that there is not room in @@ -147,36 +171,9 @@ int GetSpaceRemainingOnCurrentPage(v8::internal::NewSpace* space) { } return static_cast(Page::FromAddress(space->top())->area_end() - top); } -} // namespace -bool HeapInternalsBase::FillCurrentPageButNBytes( - v8::internal::NewSpace* space, int extra_bytes, - std::vector>* out_handles) { - PauseAllocationObserversScope pause_observers(space->heap()); - // We cannot rely on `space->limit()` to point to the end of the current page - // in the case where inline allocations are disabled, it actually points to - // the current allocation pointer. - DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(), - space->limit() == space->top()); - int space_remaining = GetSpaceRemainingOnCurrentPage(space); - CHECK(space_remaining >= extra_bytes); - int new_linear_size = space_remaining - extra_bytes; - if (new_linear_size == 0) return false; - std::vector> handles = - CreatePadding(space->heap(), space_remaining, i::AllocationType::kYoung); - if (out_handles != nullptr) { - out_handles->insert(out_handles->end(), handles.begin(), handles.end()); - } - return true; -} - -int HeapInternalsBase::FixedArrayLenFromSize(int size) { - return std::min({(size - FixedArray::kHeaderSize) / kTaggedSize, - FixedArray::kMaxRegularLength}); -} - -std::vector> HeapInternalsBase::CreatePadding( - Heap* heap, int padding_size, AllocationType allocation, int object_size) { +std::vector> CreatePadding(Heap* heap, int padding_size, + AllocationType allocation) { std::vector> handles; Isolate* isolate = heap->isolate(); int allocate_memory; @@ -191,8 +188,8 @@ std::vector> HeapInternalsBase::CreatePadding( CHECK(padding_size <= overall_free_memory || overall_free_memory == 0); } while (free_memory > 0) { - if (free_memory > object_size) { - allocate_memory = object_size; + if (free_memory > kMaxRegularHeapObjectSize) { + allocate_memory = kMaxRegularHeapObjectSize; length = FixedArrayLenFromSize(allocate_memory); } else { allocate_memory = free_memory; @@ -220,6 +217,42 @@ std::vector> HeapInternalsBase::CreatePadding( return handles; } +void FillCurrentSemiSpacePage(v8::internal::NewSpace* space, + std::vector>* out_handles) { + // We cannot rely on `space->limit()` to point to the end of the current page + // in the case where inline allocations are disabled, it actually points to + // the current allocation pointer. + DCHECK_IMPLIES(!space->IsInlineAllocationEnabled(), + space->limit() == space->top()); + int space_remaining = GetSpaceRemainingOnCurrentSemiSpacePage(space); + if (space_remaining == 0) return; + std::vector> handles = + CreatePadding(space->heap(), space_remaining, i::AllocationType::kYoung); + if (out_handles != nullptr) { + out_handles->insert(out_handles->end(), handles.begin(), handles.end()); + } +} + +void FillCurrenPagedSpacePage(v8::internal::NewSpace* space, + std::vector>* out_handles) { + if (space->top() == kNullAddress) return; + Page* page = Page::FromAllocationAreaAddress(space->top()); + space->FreeLinearAllocationArea(); + FillPageInPagedSpace(page, out_handles); +} + +} // namespace + +void HeapInternalsBase::FillCurrentPage( + v8::internal::NewSpace* space, + std::vector>* out_handles) { + PauseAllocationObserversScope pause_observers(space->heap()); + if (v8_flags.minor_mc) + FillCurrenPagedSpacePage(space, out_handles); + else + FillCurrentSemiSpacePage(space, out_handles); +} + bool IsNewObjectInCorrectGeneration(HeapObject object) { return v8_flags.single_generation ? !i::Heap::InYoungGeneration(object) : i::Heap::InYoungGeneration(object); diff --git a/test/unittests/heap/heap-utils.h b/test/unittests/heap/heap-utils.h index f902c2fea0..f4bd609d01 100644 --- a/test/unittests/heap/heap-utils.h +++ b/test/unittests/heap/heap-utils.h @@ -21,20 +21,8 @@ class HeapInternalsBase { v8::internal::NewSpace* space, std::vector>* out_handles = nullptr); void SimulateFullSpace(v8::internal::PagedSpace* space); - bool FillCurrentPageButNBytes( - v8::internal::NewSpace* space, int extra_bytes, - std::vector>* out_handles = nullptr); - bool FillCurrentPage(v8::internal::NewSpace* space, + void FillCurrentPage(v8::internal::NewSpace* space, std::vector>* out_handles = nullptr); - std::vector> CreatePadding( - Heap* heap, int padding_size, AllocationType allocation, - int object_size = kMaxRegularHeapObjectSize); - int FixedArrayLenFromSize(int size); - - private: - void SimulateFullSpace( - v8::internal::PagedNewSpace* space, - std::vector>* out_handles = nullptr); }; template From bdac0ff0593938b9cdb6d3104f222be368afbc14 Mon Sep 17 00:00:00 2001 From: Matthias Liedtke Date: Mon, 12 Sep 2022 17:38:24 +0200 Subject: [PATCH 0074/1772] [turbofan] Avoid deopt loop for GetIterator of null / undefined GetIterator on object o consists of two steps: 1) iter = load o[#Symbol.Iterator] 2) call iter For null / undefined step (1) throws an exception, meaning step (2) is never reached. Up to this change, turbofan deopts if for either of the two steps there isn't enough feedback, meaning that we have a deopt loop for null and undefined. Change-Id: Ie0eaf8e231a149313e10af9e95fd80bc77dc0beb Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890980 Reviewed-by: Tobias Tebbi Commit-Queue: Tobias Tebbi Auto-Submit: Matthias Liedtke Cr-Commit-Position: refs/heads/main@{#83159} --- src/compiler/js-type-hint-lowering.cc | 5 --- test/mjsunit/compiler/get-iterator-deopt.js | 42 +++++++++++++++++++++ 2 files changed, 42 insertions(+), 5 deletions(-) create mode 100644 test/mjsunit/compiler/get-iterator-deopt.js diff --git a/src/compiler/js-type-hint-lowering.cc b/src/compiler/js-type-hint-lowering.cc index 94f3a36d96..0f45010680 100644 --- a/src/compiler/js-type-hint-lowering.cc +++ b/src/compiler/js-type-hint-lowering.cc @@ -500,11 +500,6 @@ JSTypeHintLowering::ReduceGetIteratorOperation(const Operator* op, DeoptimizeReason::kInsufficientTypeFeedbackForGenericNamedAccess)) { return LoweringResult::Exit(node); } - if (Node* node = BuildDeoptIfFeedbackIsInsufficient( - call_slot, effect, control, - DeoptimizeReason::kInsufficientTypeFeedbackForCall)) { - return LoweringResult::Exit(node); - } return LoweringResult::NoChange(); } diff --git a/test/mjsunit/compiler/get-iterator-deopt.js b/test/mjsunit/compiler/get-iterator-deopt.js new file mode 100644 index 0000000000..4d7c081363 --- /dev/null +++ b/test/mjsunit/compiler/get-iterator-deopt.js @@ -0,0 +1,42 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +function throwsRepeated(fn, ErrorType) { + // Collect type feedback. + %PrepareFunctionForOptimization(fn); + for (let i = 0; i < 5; i++) assertThrows(fn, ErrorType); + // Force compilation and run. + %OptimizeFunctionOnNextCall(fn); + assertThrows(fn, ErrorType); + // If the function isn't optimized / turbofan tier not available, + // a deopt happened on the call above. + assertEquals(%IsTurbofanEnabled(), %ActiveTierIsTurbofan(fn)); +} + +function repeated(fn) { + // Collect type feedback. + %PrepareFunctionForOptimization(fn); + for (let i = 0; i < 5; i++) fn(); + // Force compilation and run. + %OptimizeFunctionOnNextCall(fn); + fn(); + // If the function isn't optimized / turbofan tier not available, + // a deopt happened on the call above. + assertEquals(%IsTurbofanEnabled(), %ActiveTierIsTurbofan(fn)); +} + +repeated(() => { for (let p of "abc") { } }); +repeated(() => { for (let p of [1, 2, 3]) { } }); +throwsRepeated(() => { for (let p of {a: 1, b: 2}) { } }, TypeError); +let objWithIterator = { [Symbol.iterator]: function* () { yield 1; } }; +repeated(() => { for (let p of objWithIterator) { } }); +throwsRepeated(() => { for (let p of 5) { } }, TypeError); +throwsRepeated(() => { for (let p of new Number(5)) { } }, TypeError); +throwsRepeated(() => { for (let p of true) { } }, TypeError); +throwsRepeated(() => { for (let p of new BigInt(123)) { } }, TypeError); +throwsRepeated(() => { for (let p of new Symbol("symbol")) { } }, TypeError); +throwsRepeated(function testUndef() { for (let p of undefined) { } }, TypeError); +throwsRepeated(() => { for (let p of null) { } }, TypeError); From ef1b19d6263243a0c382987e7c6869dad854a00f Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Tue, 13 Sep 2022 16:40:05 +0800 Subject: [PATCH 0075/1772] [loong64][mips64][wasm][liftoff] Fix and cleanup tracing of return value Port commit 6f9e71fa74eb589a48c0f5065ac961a64cb515a3 Change-Id: I8aaf45c82b3787acd55de595cebe6b4b3c99efc2 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3893596 Auto-Submit: Liu Yu Reviewed-by: Zhao Jiazhong Commit-Queue: Zhao Jiazhong Cr-Commit-Position: refs/heads/main@{#83160} --- src/wasm/baseline/loong64/liftoff-assembler-loong64.h | 4 ++++ src/wasm/baseline/mips64/liftoff-assembler-mips64.h | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h index 55a5459912..5c4ac0f0a1 100644 --- a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h +++ b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h @@ -1012,6 +1012,10 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { + Sub_d(dst, fp, Operand(offset)); +} + void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { TurboAssembler::Clz_d(dst.gp(), src.gp()); } diff --git a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index b7d47506af..462783eb3e 100644 --- a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -1109,6 +1109,10 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { + Dsub(dst, fp, Operand(offset)); +} + void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { TurboAssembler::Dclz(dst.gp(), src.gp()); } From f650bdc95ca5fa5776af824d90d087f73cc666c7 Mon Sep 17 00:00:00 2001 From: Camillo Date: Tue, 13 Sep 2022 13:20:05 +0200 Subject: [PATCH 0076/1772] [tools][profiling] Use absolute paths as command inputs Drive-by-fixes: - Auto-create the --perf-data-dir Change-Id: I6801452f9c4c6b9069a29aa3ab1e25909adffb19 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3893858 Auto-Submit: Camillo Bruni Commit-Queue: Jakob Kummerow Reviewed-by: Jakob Kummerow Cr-Commit-Position: refs/heads/main@{#83161} --- tools/profiling/linux-perf-chrome.py | 20 ++++++++++---------- tools/profiling/linux-perf-d8.py | 11 +++++++---- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/tools/profiling/linux-perf-chrome.py b/tools/profiling/linux-perf-chrome.py index 2ed86c535c..0f3a7ec1cc 100755 --- a/tools/profiling/linux-perf-chrome.py +++ b/tools/profiling/linux-perf-chrome.py @@ -100,7 +100,7 @@ if options.perf_data_dir is None: options.perf_data_dir = Path.cwd() else: options.perf_data_dir = Path(options.perf_data_dir).absolute() - +options.perf_data_dir.mkdir(parents=True, exist_ok=True) if not options.perf_data_dir.is_dir(): parser.error(f"--perf-data-dir={options.perf_data_dir} " "is not an directory or does not exist.") @@ -203,12 +203,12 @@ log("PARALLEL POST PROCESSING: Injecting JS symbols") def inject_v8_symbols(perf_dat_file): output_file = perf_dat_file.with_suffix(".data.jitted") cmd = [ - "perf", "inject", "--jit", f"--input={perf_dat_file}", - f"--output={output_file}" + "perf", "inject", "--jit", f"--input={perf_dat_file.absolute()}", + f"--output={output_file.absolute()}" ] try: subprocess.check_call(cmd) - print(f"Processed: {output_file}") + print(f"Processed: {output_file.name}") except: print(shlex.join(cmd)) return None @@ -236,14 +236,14 @@ for output_file in reversed(results): ) # ============================================================================== -path_strings = [str(path.relative_to(old_cwd)) for path in results] -largest_result = path_strings[-1] -results_str = ' '.join(path_strings) +rel_path_strings = [str(path.relative_to(old_cwd)) for path in results] +abs_path_strings = [str(path.absolute()) for path in results] +largest_result = abs_path_strings[-1] if not shutil.which('gcertstatus'): log("ANALYSIS") print(f"perf report --input='{largest_result}'") - print(f"pprof {path_strings}") + print(f"pprof {rel_path_strings}") exit(0) log("PPROF") @@ -260,13 +260,13 @@ try: print(url) print("# Processing and uploading combined pprof result") - url = subprocess.check_output(cmd + path_strings).decode('utf-8').strip() + url = subprocess.check_output(cmd + abs_path_strings).decode('utf-8').strip() print("# PPROF RESULT") print(url) except subprocess.CalledProcessError as e: if has_gcert: raise Exception("Could not generate pprof results") from e print("# Please run `gcert` for generating pprof results") - print(f"pprof -flame {' '.join(path_strings)}") + print(f"pprof -flame {' '.join(rel_path_strings)}") except KeyboardInterrupt: exit(1) diff --git a/tools/profiling/linux-perf-d8.py b/tools/profiling/linux-perf-d8.py index 3371493b0c..ba2561d15e 100755 --- a/tools/profiling/linux-perf-d8.py +++ b/tools/profiling/linux-perf-d8.py @@ -110,7 +110,7 @@ if options.perf_data_dir is None: options.perf_data_dir = Path.cwd() else: options.perf_data_dir = Path(options.perf_data_dir).absolute() - +options.perf_data_dir.mkdir(parents=True, exist_ok=True) if not options.perf_data_dir.is_dir(): parser.error(f"--perf-data-dir={options.perf_data_dir} " "is not an directory or does not exist.") @@ -218,8 +218,8 @@ log("POST PROCESSING: Injecting JS symbols") def inject_v8_symbols(perf_dat_file): output_file = perf_dat_file.with_suffix(".data.jitted") cmd = [ - "perf", "inject", "--jit", f"--input={perf_dat_file}", - f"--output={output_file}" + "perf", "inject", "--jit", f"--input={perf_dat_file.absolute()}", + f"--output={output_file.absolute()}" ] try: subprocess.check_call(cmd) @@ -252,7 +252,10 @@ try: subprocess.check_call("gcertstatus >&/dev/null || gcert", shell=True) has_gcert = True - cmd = ["pprof", "-flame", f"-add_comment={shlex.join(sys.argv)}", str(result)] + cmd = [ + "pprof", "-flame", f"-add_comment={shlex.join(sys.argv)}", + str(result.absolute()) + ] print("# Processing and uploading to pprofresult") url = subprocess.check_output(cmd).decode('utf-8').strip() print(url) From b568d4dcd02d47606804efc0ba1cd0fd5fa88665 Mon Sep 17 00:00:00 2001 From: Matthias Liedtke Date: Tue, 13 Sep 2022 11:22:09 +0200 Subject: [PATCH 0077/1772] [wasm][test] Replace busy loops with explicit wasm tierup Fixed: v8:12463 Change-Id: I7ca2d3db803ca6ac50c1340d747f98d03c3985a4 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890982 Reviewed-by: Jakob Kummerow Commit-Queue: Matthias Liedtke Cr-Commit-Position: refs/heads/main@{#83162} --- test/mjsunit/mjsunit.status | 4 -- test/mjsunit/regress/wasm/regress-1179065.js | 10 ++-- test/mjsunit/wasm/speculative-inlining.js | 56 +++++++++----------- 3 files changed, 30 insertions(+), 40 deletions(-) diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status index 1240b8da63..7c70ab8184 100644 --- a/test/mjsunit/mjsunit.status +++ b/test/mjsunit/mjsunit.status @@ -1128,10 +1128,6 @@ # BUG(v8:12605): flaky test. 'wasm/grow-shared-memory': [SKIP], - # BUG(v8:12463) - 'regress/wasm/regress-1179065': [SKIP], - 'wasm/speculative-inlining': [SKIP], - # BUG(v8:13234) 'wasm/shared-memory-worker-gc-stress': [SKIP], 'object-literal': [SKIP], diff --git a/test/mjsunit/regress/wasm/regress-1179065.js b/test/mjsunit/regress/wasm/regress-1179065.js index ca843f3d8d..71f96d3d08 100644 --- a/test/mjsunit/regress/wasm/regress-1179065.js +++ b/test/mjsunit/regress/wasm/regress-1179065.js @@ -8,14 +8,14 @@ d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js'); const builder = new WasmModuleBuilder(); builder.addMemory(1, 10); -builder.addFunction('load', kSig_i_i).addBody([ +let loadFct = builder.addFunction('load', kSig_i_i).addBody([ // signature: i_i // body: kExprLocalGet, 0, // local.get kExprI32LoadMem, 0, 0, // i32.load_mem ]).exportFunc(); const instance = builder.instantiate(); -// Call multiple times to trigger dynamic tiering. -while (%IsLiftoffFunction(instance.exports.load)) { - instance.exports.load(1); -} +for (let i = 0; i < 20; i++) instance.exports.load(1); +%WasmTierUpFunction(instance, loadFct.index); +assertFalse(%IsLiftoffFunction(instance.exports.load)); +instance.exports.load(1); diff --git a/test/mjsunit/wasm/speculative-inlining.js b/test/mjsunit/wasm/speculative-inlining.js index 2f882e933f..eff9fd62de 100644 --- a/test/mjsunit/wasm/speculative-inlining.js +++ b/test/mjsunit/wasm/speculative-inlining.js @@ -4,8 +4,7 @@ // Flags: --wasm-speculative-inlining --experimental-wasm-return-call // Flags: --experimental-wasm-typed-funcref --experimental-wasm-type-reflection -// Flags: --no-wasm-tier-up --wasm-dynamic-tiering --wasm-tiering-budget=100 -// Flags: --allow-natives-syntax +// Flags: --no-wasm-tier-up --wasm-dynamic-tiering --allow-natives-syntax // These tests check if functions are speculatively inlined as expected. We do // not check automatically which functions are inlined. To get more insight, run @@ -26,17 +25,15 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); [kExprRefFunc, callee.index]); // g(x) = f(5) + x - builder.addFunction("main", kSig_i_i) + let main = builder.addFunction("main", kSig_i_i) .addBody([kExprI32Const, 5, kExprGlobalGet, global.index, kExprCallRef, callee.type_index, kExprLocalGet, 0, kExprI32Add]) .exportAs("main"); let instance = builder.instantiate(); - // Run 'main' until it is tiered-up. - while (%IsLiftoffFunction(instance.exports.main)) { - assertEquals(14, instance.exports.main(10)); - } + for (let i = 0; i < 20; i++) assertEquals(14, instance.exports.main(10)); + %WasmTierUpFunction(instance, main.index); // The tiered-up function should have {callee} speculatively inlined. assertEquals(14, instance.exports.main(10)); })(); @@ -61,7 +58,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); [kExprRefFunc, callee1.index]); // g(x, y) = if (y) { h(5) + x } else { f(7) + x } - builder.addFunction("main", kSig_i_ii) + let main = builder.addFunction("main", kSig_i_ii) .addBody([ kExprLocalGet, 1, kExprIf, kWasmI32, @@ -77,12 +74,10 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); let instance = builder.instantiate(); - // Run 'main' until it is tiered-up. - while (%IsLiftoffFunction(instance.exports.main)) { - assertEquals(14, instance.exports.main(10, 1)); - } + for (let i = 0; i < 20; i++) assertEquals(14, instance.exports.main(10, 1)); + %WasmTierUpFunction(instance, main.index); // Tier-up is done, and {callee0} should be inlined in the trace. - assertEquals(14, instance.exports.main(10, 1)) + assertEquals(14, instance.exports.main(10, 1)); // Now, run main with {callee1} instead. The correct reference should still be // called after inlining. @@ -101,17 +96,16 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); [kExprRefFunc, callee.index]); // g(x) = f(5 + x) - builder.addFunction("main", kSig_i_i) + let main = builder.addFunction("main", kSig_i_i) .addBody([kExprI32Const, 5, kExprLocalGet, 0, kExprI32Add, kExprGlobalGet, global.index, kExprReturnCallRef, callee.type_index]) .exportAs("main"); let instance = builder.instantiate(); - // Run 'main' until it is tiered-up. - while (%IsLiftoffFunction(instance.exports.main)) { - assertEquals(14, instance.exports.main(10)); - } + + for (let i = 0; i < 20; i++) assertEquals(14, instance.exports.main(10)); + %WasmTierUpFunction(instance, main.index); // After tier-up, the tail call should be speculatively inlined. assertEquals(14, instance.exports.main(10)); })(); @@ -136,7 +130,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); [kExprRefFunc, callee1.index]); // g(x, y) = if (y) { h(x) } else { f(x) } - builder.addFunction("main", kSig_i_ii) + let main = builder.addFunction("main", kSig_i_ii) .addBody([ kExprLocalGet, 1, kExprIf, kWasmI32, @@ -149,10 +143,9 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); .exportAs("main"); let instance = builder.instantiate(); - // Run 'main' until it is tiered-up. - while (%IsLiftoffFunction(instance.exports.main)) { - assertEquals(9, instance.exports.main(10, 1)); - } + + assertEquals(9, instance.exports.main(10, 1)); + %WasmTierUpFunction(instance, main.index); // After tier-up, {callee0} should be inlined in the trace. assertEquals(9, instance.exports.main(10, 1)) @@ -176,6 +169,7 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); return builder.instantiate({m : { i_f1 : x => x + 1, i_f2 : x => x + 2}}); }(); + let main = null; let instance2 = function() { let builder = new WasmModuleBuilder(); @@ -186,8 +180,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add]) .exportFunc(); - builder.addFunction("main", makeSig([kWasmI32, - wasmRefType(sig1)], [kWasmI32])) + main = builder.addFunction("main", + makeSig([kWasmI32, wasmRefType(sig1)], [kWasmI32])) .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprCallRef, sig1]) .exportFunc(); @@ -195,9 +189,8 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); }(); // Run 'main' until it is tiered-up. - while (%IsLiftoffFunction(instance2.exports.main)) { - assertEquals(1, instance2.exports.main(0, instance1.exports.f1)); - } + assertEquals(1, instance2.exports.main(0, instance1.exports.f1)); + %WasmTierUpFunction(instance2, main.index); // The function f1 defined in another module should not be inlined. assertEquals(1, instance2.exports.main(0, instance1.exports.f1)); })(); @@ -212,12 +205,13 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); let f2 = new WebAssembly.Function({parameters: ["i32"], results: ["i32"]}, x => x * 2); + let main = null; let instance2 = function() { let builder = new WasmModuleBuilder(); let sig = builder.addType(kSig_i_i); - builder.addFunction("main", makeSig( + main = builder.addFunction("main", makeSig( [kWasmI32, wasmRefType(sig), wasmRefType(sig)], [kWasmI32])) .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprCallRef, sig, kExprLocalGet, 0, kExprLocalGet, 2, kExprCallRef, sig, @@ -231,14 +225,14 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js"); // Run 'main' until it is tiered-up. The first argument should try to be // spec-inlined monomorphically. We pass f2 to the second argument 80% of the // time, so it should try to be spec-inlined polymorphically. - while (%IsLiftoffFunction(instance2.exports.main)) { + for (let i = 0; i < 20; i++) { if (i % 5 == 0) { assertEquals(12, instance2.exports.main(5, f1, f1)); } else { assertEquals(16, instance2.exports.main(5, f1, f2)); } - i++; } + %WasmTierUpFunction(instance2, main.index); // WebAssembly.Function objects should not be inlined. assertEquals(16, instance2.exports.main(5, f1, f2)); assertEquals(12, instance2.exports.main(5, f1, f1)); From ac0cedf1615db8d38a68de29210c5fff83a6f327 Mon Sep 17 00:00:00 2001 From: Milad Fa Date: Mon, 12 Sep 2022 11:21:42 -0400 Subject: [PATCH 0078/1772] Fix LoadSpillAddress on big endian BE machines use a 4 byte bias to spill/fill 32-bit values on the stack. This is done so because TF always fills 64-bit values even if the spilled value was 32-bits. To make sure this holds between LO and TF we have added a 4 byte bias in this CL: crrev.com/c/2756712 LoadSpillAddress needs to also take this into account and add a bias if the spilled value was 4 bytes. Change-Id: Ibd2b2071ce1fb11a9c5884611ae8edd1f17cb0c9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891196 Commit-Queue: Milad Farazmand Reviewed-by: Thibaud Michaud Cr-Commit-Position: refs/heads/main@{#83163} --- src/wasm/baseline/arm/liftoff-assembler-arm.h | 3 ++- src/wasm/baseline/arm64/liftoff-assembler-arm64.h | 3 ++- src/wasm/baseline/ia32/liftoff-assembler-ia32.h | 3 ++- src/wasm/baseline/liftoff-assembler.h | 2 +- src/wasm/baseline/liftoff-compiler.cc | 2 +- src/wasm/baseline/ppc/liftoff-assembler-ppc.h | 4 +++- src/wasm/baseline/riscv/liftoff-assembler-riscv.h | 3 ++- src/wasm/baseline/s390/liftoff-assembler-s390.h | 4 +++- src/wasm/baseline/x64/liftoff-assembler-x64.h | 3 ++- 9 files changed, 18 insertions(+), 9 deletions(-) diff --git a/src/wasm/baseline/arm/liftoff-assembler-arm.h b/src/wasm/baseline/arm/liftoff-assembler-arm.h index 74438b7a35..324b4860c8 100644 --- a/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -1550,7 +1550,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { pop(r0); } -void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, + ValueKind /* kind */) { sub(dst, fp, Operand(offset)); } diff --git a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h index 403dd61687..b2b3c3ff00 100644 --- a/src/wasm/baseline/arm64/liftoff-assembler-arm64.h +++ b/src/wasm/baseline/arm64/liftoff-assembler-arm64.h @@ -1033,7 +1033,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } -void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, + ValueKind /* kind */) { Sub(dst, fp, offset); } diff --git a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 6c5e3a0788..fad96ab52e 100644 --- a/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -1286,7 +1286,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } -void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, + ValueKind /* kind */) { lea(dst, liftoff::GetStackSlot(offset)); } diff --git a/src/wasm/baseline/liftoff-assembler.h b/src/wasm/baseline/liftoff-assembler.h index 91e957c31a..189509e724 100644 --- a/src/wasm/baseline/liftoff-assembler.h +++ b/src/wasm/baseline/liftoff-assembler.h @@ -658,7 +658,7 @@ class LiftoffAssembler : public TurboAssembler { void Spill(VarState* slot); void SpillLocals(); void SpillAllRegisters(); - inline void LoadSpillAddress(Register dst, int offset); + inline void LoadSpillAddress(Register dst, int offset, ValueKind kind); // Clear any uses of {reg} in both the cache and in {possible_uses}. // Any use in the stack is spilled. If any register in {possible_uses} matches diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index f3748f7dd6..deacfaf943 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -2360,7 +2360,7 @@ class LiftoffCompiler { __ Spill(&return_slot); } DCHECK(return_slot.is_stack()); - __ LoadSpillAddress(param_reg, return_slot.offset()); + __ LoadSpillAddress(param_reg, return_slot.offset(), return_slot.kind()); } source_position_table_builder_.AddPosition( diff --git a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h index 7b8b5837fd..25d1411cd3 100644 --- a/src/wasm/baseline/ppc/liftoff-assembler-ppc.h +++ b/src/wasm/baseline/ppc/liftoff-assembler-ppc.h @@ -1093,7 +1093,9 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } -void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, + ValueKind kind) { + if (kind == kI32) offset = offset + stack_bias; SubS64(dst, fp, Operand(offset)); } diff --git a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h index fb2dcf62cc..e5838031ab 100644 --- a/src/wasm/baseline/riscv/liftoff-assembler-riscv.h +++ b/src/wasm/baseline/riscv/liftoff-assembler-riscv.h @@ -157,7 +157,8 @@ void LiftoffAssembler::PatchPrepareStackFrame( GenPCRelativeJump(kScratchReg, imm32); } -void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, + ValueKind /* kind */) { SubWord(dst, fp, offset); } diff --git a/src/wasm/baseline/s390/liftoff-assembler-s390.h b/src/wasm/baseline/s390/liftoff-assembler-s390.h index 0476818b2f..8a45f09bf6 100644 --- a/src/wasm/baseline/s390/liftoff-assembler-s390.h +++ b/src/wasm/baseline/s390/liftoff-assembler-s390.h @@ -1552,7 +1552,9 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { pop(r0); } -void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, + ValueKind kind) { + if (kind == kI32) offset = offset + stack_bias; SubS64(dst, fp, Operand(offset)); } diff --git a/src/wasm/baseline/x64/liftoff-assembler-x64.h b/src/wasm/baseline/x64/liftoff-assembler-x64.h index fe3e897c6b..3d3c16b187 100644 --- a/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -1018,7 +1018,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } -void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, + ValueKind /* kind */) { leaq(dst, liftoff::GetStackSlot(offset)); } From de18a05e7b9893a946feeae93a8bfd00543cc835 Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Tue, 13 Sep 2022 13:48:55 +0200 Subject: [PATCH 0079/1772] [maglev] Keep receiver in a stack slot for OptimizedFrame::Summarize For frame inspection (i.e. not deoptimization), no RegisterValues are available to TranslatedState and thus any register-allocated value is unavailable. Stack trace collection require `function` and `receiver` values to be available and thus stack-allocated. Both are immutable and have fixed stack slots so this is not a problem; we just lost track of the receiver inside Maglev when function parameters were wrapped inside exception Phi nodes. We solve this for now by special-casing the `receiver` to reuse the InitialValue node instead of creating a new Phi. Bug: v8:7700 Change-Id: I4f4de9a643b98e2fcbc7ee7a53688cc97a8d6f1d Fixed: chromium:1359428 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3893856 Reviewed-by: Leszek Swirski Auto-Submit: Jakob Linke Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#83164} --- BUILD.gn | 1 + src/interpreter/bytecode-register.h | 3 + src/maglev/maglev-graph-builder.cc | 63 ++++++++++--------- src/maglev/maglev-graph-builder.h | 10 ++- src/maglev/maglev-graph.h | 3 + src/maglev/maglev-interpreter-frame-state.cc | 64 ++++++++++++++++++++ src/maglev/maglev-interpreter-frame-state.h | 33 +--------- src/maglev/maglev-ir.cc | 12 +++- 8 files changed, 124 insertions(+), 65 deletions(-) create mode 100644 src/maglev/maglev-interpreter-frame-state.cc diff --git a/BUILD.gn b/BUILD.gn index 29a4773526..3f78b1773f 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -4731,6 +4731,7 @@ v8_source_set("v8_base_without_compiler") { "src/maglev/maglev-concurrent-dispatcher.cc", "src/maglev/maglev-graph-builder.cc", "src/maglev/maglev-graph-printer.cc", + "src/maglev/maglev-interpreter-frame-state.cc", "src/maglev/maglev-ir.cc", "src/maglev/maglev-regalloc.cc", "src/maglev/maglev.cc", diff --git a/src/interpreter/bytecode-register.h b/src/interpreter/bytecode-register.h index 7fd47b681c..91a7944019 100644 --- a/src/interpreter/bytecode-register.h +++ b/src/interpreter/bytecode-register.h @@ -29,6 +29,9 @@ class V8_EXPORT_PRIVATE Register final { static Register FromParameterIndex(int index); int ToParameterIndex() const; + static Register receiver() { return FromParameterIndex(0); } + bool is_receiver() const { return ToParameterIndex() == 0; } + // Returns an invalid register. static Register invalid_value() { return Register(); } diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index a45609954f..88a6473b7c 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -72,36 +72,6 @@ MaglevGraphBuilder::MaglevGraphBuilder(LocalIsolate* local_isolate, } CalculatePredecessorCounts(); - - for (auto& offset_and_info : bytecode_analysis().GetLoopInfos()) { - int offset = offset_and_info.first; - const compiler::LoopInfo& loop_info = offset_and_info.second; - const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset); - DCHECK_NULL(merge_states_[offset]); - if (FLAG_trace_maglev_graph_building) { - std::cout << "- Creating loop merge state at @" << offset << std::endl; - } - merge_states_[offset] = MergePointInterpreterFrameState::NewForLoop( - *compilation_unit_, offset, NumPredecessors(offset), liveness, - &loop_info); - } - - if (bytecode().handler_table_size() > 0) { - HandlerTable table(*bytecode().object()); - for (int i = 0; i < table.NumberOfRangeEntries(); i++) { - int offset = table.GetRangeHandler(i); - const compiler::BytecodeLivenessState* liveness = - GetInLivenessFor(offset); - DCHECK_EQ(NumPredecessors(offset), 0); - DCHECK_NULL(merge_states_[offset]); - if (FLAG_trace_maglev_graph_building) { - std::cout << "- Creating exception merge state at @" << offset - << std::endl; - } - merge_states_[offset] = MergePointInterpreterFrameState::NewForCatchBlock( - *compilation_unit_, liveness, offset); - } - } } void MaglevGraphBuilder::StartPrologue() { @@ -157,6 +127,38 @@ void MaglevGraphBuilder::BuildRegisterFrameInitialization() { } } +void MaglevGraphBuilder::BuildMergeStates() { + for (auto& offset_and_info : bytecode_analysis().GetLoopInfos()) { + int offset = offset_and_info.first; + const compiler::LoopInfo& loop_info = offset_and_info.second; + const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset); + DCHECK_NULL(merge_states_[offset]); + if (FLAG_trace_maglev_graph_building) { + std::cout << "- Creating loop merge state at @" << offset << std::endl; + } + merge_states_[offset] = MergePointInterpreterFrameState::NewForLoop( + *compilation_unit_, offset, NumPredecessors(offset), liveness, + &loop_info); + } + + if (bytecode().handler_table_size() > 0) { + HandlerTable table(*bytecode().object()); + for (int i = 0; i < table.NumberOfRangeEntries(); i++) { + int offset = table.GetRangeHandler(i); + const compiler::BytecodeLivenessState* liveness = + GetInLivenessFor(offset); + DCHECK_EQ(NumPredecessors(offset), 0); + DCHECK_NULL(merge_states_[offset]); + if (FLAG_trace_maglev_graph_building) { + std::cout << "- Creating exception merge state at @" << offset + << std::endl; + } + merge_states_[offset] = MergePointInterpreterFrameState::NewForCatchBlock( + *compilation_unit_, liveness, offset, graph_, is_inline()); + } + } +} + namespace { template struct NodeForOperationHelper; @@ -1849,6 +1851,7 @@ void MaglevGraphBuilder::InlineCallFromRegisters( // TODO(leszeks): Also correctly set up the closure and context slots, instead // of using InitialValue. inner_graph_builder.BuildRegisterFrameInitialization(); + inner_graph_builder.BuildMergeStates(); BasicBlock* inlined_prologue = inner_graph_builder.EndPrologue(); // Set the entry JumpToInlined to jump to the prologue block. diff --git a/src/maglev/maglev-graph-builder.h b/src/maglev/maglev-graph-builder.h index 5f34fc5350..93634d79c3 100644 --- a/src/maglev/maglev-graph-builder.h +++ b/src/maglev/maglev-graph-builder.h @@ -44,10 +44,15 @@ class MaglevGraphBuilder { StartPrologue(); for (int i = 0; i < parameter_count(); i++) { - SetArgument(i, AddNewNode( - {}, interpreter::Register::FromParameterIndex(i))); + // TODO(v8:7700): Consider creating InitialValue nodes lazily. + InitialValue* v = AddNewNode( + {}, interpreter::Register::FromParameterIndex(i)); + DCHECK_EQ(graph()->parameters().size(), static_cast(i)); + graph()->parameters().push_back(v); + SetArgument(i, v); } BuildRegisterFrameInitialization(); + BuildMergeStates(); EndPrologue(); BuildBody(); } @@ -56,6 +61,7 @@ class MaglevGraphBuilder { void SetArgument(int i, ValueNode* value); ValueNode* GetTaggedArgument(int i); void BuildRegisterFrameInitialization(); + void BuildMergeStates(); BasicBlock* EndPrologue(); void BuildBody() { diff --git a/src/maglev/maglev-graph.h b/src/maglev/maglev-graph.h index e6da9d4900..cc084e8350 100644 --- a/src/maglev/maglev-graph.h +++ b/src/maglev/maglev-graph.h @@ -32,6 +32,7 @@ class Graph final : public ZoneObject { smi_(zone), int_(zone), float_(zone), + parameters_(zone), constants_(zone) {} BasicBlock* operator[](int i) { return blocks_[i]; } @@ -65,6 +66,7 @@ class Graph final : public ZoneObject { ZoneMap& smi() { return smi_; } ZoneMap& int32() { return int_; } ZoneMap& float64() { return float_; } + ZoneVector& parameters() { return parameters_; } compiler::ZoneRefMap& constants() { return constants_; } @@ -82,6 +84,7 @@ class Graph final : public ZoneObject { ZoneMap smi_; ZoneMap int_; ZoneMap float_; + ZoneVector parameters_; compiler::ZoneRefMap constants_; Float64Constant* nan_ = nullptr; }; diff --git a/src/maglev/maglev-interpreter-frame-state.cc b/src/maglev/maglev-interpreter-frame-state.cc new file mode 100644 index 0000000000..ec6caabbfe --- /dev/null +++ b/src/maglev/maglev-interpreter-frame-state.cc @@ -0,0 +1,64 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/maglev/maglev-interpreter-frame-state.h" + +#include "src/maglev/maglev-compilation-info.h" +#include "src/maglev/maglev-graph.h" + +namespace v8 { +namespace internal { +namespace maglev { + +// static +MergePointInterpreterFrameState* +MergePointInterpreterFrameState::NewForCatchBlock( + const MaglevCompilationUnit& unit, + const compiler::BytecodeLivenessState* liveness, int handler_offset, + Graph* graph, bool is_inline) { + Zone* const zone = unit.zone(); + MergePointInterpreterFrameState* state = + zone->New( + unit, 0, 0, nullptr, BasicBlockType::kExceptionHandlerStart, + liveness); + auto& frame_state = state->frame_state_; + // If the accumulator is live, the ExceptionPhi associated to it is the + // first one in the block. That ensures it gets kReturnValue0 in the + // register allocator. See + // StraightForwardRegisterAllocator::AllocateRegisters. + if (frame_state.liveness()->AccumulatorIsLive()) { + frame_state.accumulator(unit) = state->NewExceptionPhi( + zone, interpreter::Register::virtual_accumulator(), handler_offset); + } + frame_state.ForEachParameter( + unit, [&](ValueNode*& entry, interpreter::Register reg) { + if (!is_inline && reg.is_receiver()) { + // The receiver is a special case for a fairly silly reason: + // OptimizedFrame::Summarize requires the receiver (and the function) + // to be in a stack slot, since it's value must be available even + // though we're not deoptimizing (and thus register states are not + // available). Exception phis could be allocated in a register. + // Since the receiver is immutable, simply reuse its InitialValue + // node. + // For inlined functions / nested graph generation, this a) doesn't + // work (there's no receiver stack slot); and b) isn't necessary + // (Summarize only looks at noninlined functions). + entry = graph->parameters()[0]; + } else { + entry = state->NewExceptionPhi(zone, reg, handler_offset); + } + }); + frame_state.context(unit) = state->NewExceptionPhi( + zone, interpreter::Register::current_context(), handler_offset); + frame_state.ForEachLocal( + unit, [&](ValueNode*& entry, interpreter::Register reg) { + entry = state->NewExceptionPhi(zone, reg, handler_offset); + }); + state->known_node_aspects_ = zone->New(zone); + return state; +} + +} // namespace maglev +} // namespace internal +} // namespace v8 diff --git a/src/maglev/maglev-interpreter-frame-state.h b/src/maglev/maglev-interpreter-frame-state.h index 1e8d00499c..31f2bbe959 100644 --- a/src/maglev/maglev-interpreter-frame-state.h +++ b/src/maglev/maglev-interpreter-frame-state.h @@ -481,36 +481,9 @@ class MergePointInterpreterFrameState { } static MergePointInterpreterFrameState* NewForCatchBlock( - const MaglevCompilationUnit& info, - const compiler::BytecodeLivenessState* liveness, int handler_offset) { - MergePointInterpreterFrameState* state = - info.zone()->New( - info, 0, 0, nullptr, BasicBlockType::kExceptionHandlerStart, - liveness); - auto& frame_state = state->frame_state_; - // If the accumulator is live, the ExceptionPhi associated to it is the - // first one in the block. That ensures it gets kReturnValue0 in the - // register allocator. See - // StraightForwardRegisterAllocator::AllocateRegisters. - if (frame_state.liveness()->AccumulatorIsLive()) { - frame_state.accumulator(info) = state->NewExceptionPhi( - info.zone(), interpreter::Register::virtual_accumulator(), - handler_offset); - } - frame_state.ForEachParameter( - info, [&](ValueNode*& entry, interpreter::Register reg) { - entry = state->NewExceptionPhi(info.zone(), reg, handler_offset); - }); - frame_state.context(info) = state->NewExceptionPhi( - info.zone(), interpreter::Register::current_context(), handler_offset); - frame_state.ForEachLocal( - info, [&](ValueNode*& entry, interpreter::Register reg) { - entry = state->NewExceptionPhi(info.zone(), reg, handler_offset); - }); - state->known_node_aspects_ = - info.zone()->New(info.zone()); - return state; - } + const MaglevCompilationUnit& unit, + const compiler::BytecodeLivenessState* liveness, int handler_offset, + Graph* graph, bool is_inline); // Merges an unmerged framestate with a possibly merged framestate into |this| // framestate. diff --git a/src/maglev/maglev-ir.cc b/src/maglev/maglev-ir.cc index de48bb491b..31280f4d85 100644 --- a/src/maglev/maglev-ir.cc +++ b/src/maglev/maglev-ir.cc @@ -2827,9 +2827,15 @@ void ChangeInt32ToFloat64::GenerateCode(MaglevAssembler* masm, void Phi::AllocateVreg(MaglevVregAllocationState* vreg_state) { // Phi inputs are processed in the post-process, once loop phis' inputs' // v-regs are allocated. - result().SetUnallocated( - compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT, - vreg_state->AllocateVirtualRegister()); + + // We have to pass a policy, but it is later ignored during register + // allocation. See StraightForwardRegisterAllocator::AllocateRegisters + // which has special handling for Phis. + static const compiler::UnallocatedOperand::ExtendedPolicy kIgnoredPolicy = + compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT; + + result().SetUnallocated(kIgnoredPolicy, + vreg_state->AllocateVirtualRegister()); } // TODO(verwaest): Remove after switching the register allocator. void Phi::AllocateVregInPostProcess(MaglevVregAllocationState* vreg_state) { From 6a69a24cb138bf99f5fd7d0991a413c67a4c338b Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Tue, 13 Sep 2022 14:02:36 +0200 Subject: [PATCH 0080/1772] [maglev] Only support fully monomorphic GetKeyedProperty ElementAccessFeedback transition groups can contain multiple maps in a transition group if feedback is polymorphic on elements kind but not otherwise the map kind. Maglev should treat this case as polymorphic. Bug: v8:7700 Change-Id: I779299e4cf9d1c3a30e77f7a953d057ea5a69935 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892691 Commit-Queue: Leszek Swirski Reviewed-by: Igor Sheludko Commit-Queue: Igor Sheludko Auto-Submit: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83165} --- src/maglev/maglev-graph-builder.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index 88a6473b7c..edd6f1db1d 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -1303,6 +1303,7 @@ void MaglevGraphBuilder::VisitGetKeyedProperty() { const compiler::ElementAccessFeedback& element_feedback = processed_feedback.AsElementAccess(); if (element_feedback.transition_groups().size() != 1) break; + if (element_feedback.transition_groups()[0].size() != 1) break; compiler::MapRef map = MakeRefAssumeMemoryFence( broker(), element_feedback.transition_groups()[0].front()); From d88d7aa70d014d61e71511a49f2e9f6cdc91c7fd Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Tue, 13 Sep 2022 14:31:23 +0200 Subject: [PATCH 0081/1772] [maglev] Weaken DCHECK on Double field load Double-representation field loads were DCHECKing that the entry in the descriptor array for a double-representation IC is also double representation. With in-place map updates, however, the IC may be out of date, so weaken this DCHECK to take into account in-place updates, and rely on compilation dependency commit making this lookup safe. Bug: v8:7700 Change-Id: Iff3c80d396274d14034e010dbe98f5640c9e4495 Fixed: chromium:1358872 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892692 Commit-Queue: Jakob Linke Commit-Queue: Leszek Swirski Auto-Submit: Leszek Swirski Reviewed-by: Jakob Linke Cr-Commit-Position: refs/heads/main@{#83166} --- src/maglev/maglev-graph-builder.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index edd6f1db1d..daef5c09e1 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -1040,7 +1040,8 @@ bool MaglevGraphBuilder::TryBuildMonomorphicLoadFromSmiHandler( InternalIndex index = descriptors.Search(field.property_index(), *map.object()); DCHECK(index.is_found()); - DCHECK(descriptors.GetDetails(index).representation().IsDouble()); + DCHECK(Representation::Double().CanBeInPlaceChangedTo( + descriptors.GetDetails(index).representation())); const compiler::CompilationDependency* dep = broker()->dependencies()->FieldRepresentationDependencyOffTheRecord( map, index, Representation::Double()); From 4084014d9855a8303fa9d53a06eda96b26ed4c08 Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Mon, 12 Sep 2022 08:04:04 -0700 Subject: [PATCH 0082/1772] Skip shared object GetConstructorName test if cannot create shared heap Bug: v8:12547 Change-Id: I89dbaea6b8559ada651b6ed986c842c1dc2b6df9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892129 Reviewed-by: Milad Farazmand Commit-Queue: Milad Farazmand Cr-Commit-Position: refs/heads/main@{#83167} --- test/cctest/test-api.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc index 5c3a041d75..759851f699 100644 --- a/test/cctest/test-api.cc +++ b/test/cctest/test-api.cc @@ -12937,6 +12937,8 @@ THREADED_TEST(SubclassGetConstructorName) { } UNINITIALIZED_TEST(SharedObjectGetConstructorName) { + if (!V8_CAN_CREATE_SHARED_HEAP_BOOL) return; + i::FLAG_shared_string_table = true; i::FLAG_harmony_struct = true; From 602e566e4c7b7511dd92a9584dfe43e37927f883 Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Tue, 13 Sep 2022 16:08:05 +0200 Subject: [PATCH 0083/1772] [heap] Minor fixes for MinorMC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1) Cast to PagedSpaceBase instead of PagedSpace in sweeper.cc 2) Free LAB before filling space in heap-utils.cc Bug: v8:12612 Change-Id: I5820c2d2f4ab832a4b5a829fc55973d93296ec10 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892690 Commit-Queue: Omer Katz Reviewed-by: Dominik Inführ Commit-Queue: Dominik Inführ Auto-Submit: Omer Katz Cr-Commit-Position: refs/heads/main@{#83168} --- src/heap/new-spaces.cc | 1 + src/heap/sweeper.cc | 2 +- test/unittests/heap/heap-utils.cc | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/src/heap/new-spaces.cc b/src/heap/new-spaces.cc index 9aa413c95b..4ba1f78891 100644 --- a/src/heap/new-spaces.cc +++ b/src/heap/new-spaces.cc @@ -743,6 +743,7 @@ bool SemiSpaceNewSpace::AddParkedAllocationBuffer( } void SemiSpaceNewSpace::FreeLinearAllocationArea() { + AdvanceAllocationObservers(); MakeLinearAllocationAreaIterable(); UpdateInlineAllocationLimit(0); } diff --git a/src/heap/sweeper.cc b/src/heap/sweeper.cc index 22f616c370..a910dddcbe 100644 --- a/src/heap/sweeper.cc +++ b/src/heap/sweeper.cc @@ -472,7 +472,7 @@ int Sweeper::RawSweep( if (active_system_pages_after_sweeping) { // Decrement accounted memory for discarded memory. - PagedSpace* paged_space = static_cast(p->owner()); + PagedSpaceBase* paged_space = static_cast(p->owner()); paged_space->ReduceActiveSystemPages(p, *active_system_pages_after_sweeping); } diff --git a/test/unittests/heap/heap-utils.cc b/test/unittests/heap/heap-utils.cc index b528f2826a..27194ea6b9 100644 --- a/test/unittests/heap/heap-utils.cc +++ b/test/unittests/heap/heap-utils.cc @@ -134,6 +134,7 @@ void HeapInternalsBase::SimulateFullSpace( // v8_flags.stress_concurrent_allocation = false; // Background thread allocating concurrently interferes with this function. CHECK(!v8_flags.stress_concurrent_allocation); + space->FreeLinearAllocationArea(); if (v8_flags.minor_mc) { for (Page* page : *space) { FillPageInPagedSpace(page, out_handles); From 6946d1dedb4ea6dec3a85526eedbb540c8214ea6 Mon Sep 17 00:00:00 2001 From: Teodor Dutu Date: Tue, 13 Sep 2022 12:23:40 +0000 Subject: [PATCH 0084/1772] [csa] Enable allocation folding for builtins This also allows allocation folding to be tested in cctests. Bug: v8:13070 Change-Id: I7b6991461dd7ad4423539b33f59a05d6b247c3e7 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891257 Auto-Submit: Teo Dutu Commit-Queue: Teo Dutu Reviewed-by: Igor Sheludko Cr-Commit-Position: refs/heads/main@{#83169} --- src/codegen/optimized-compilation-info.cc | 1 + test/cctest/test-code-stub-assembler.cc | 57 +++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/src/codegen/optimized-compilation-info.cc b/src/codegen/optimized-compilation-info.cc index 38f9887fcd..85fea1c902 100644 --- a/src/codegen/optimized-compilation-info.cc +++ b/src/codegen/optimized-compilation-info.cc @@ -79,6 +79,7 @@ void OptimizedCompilationInfo::ConfigureFlags() { case CodeKind::BUILTIN: case CodeKind::FOR_TESTING: if (v8_flags.turbo_splitting) set_splitting(); + if (v8_flags.enable_allocation_folding) set_allocation_folding(); #if ENABLE_GDB_JIT_INTERFACE && DEBUG set_source_positions(); #endif // ENABLE_GDB_JIT_INTERFACE && DEBUG diff --git a/test/cctest/test-code-stub-assembler.cc b/test/cctest/test-code-stub-assembler.cc index 56387a44bd..e2f26c9809 100644 --- a/test/cctest/test-code-stub-assembler.cc +++ b/test/cctest/test-code-stub-assembler.cc @@ -1912,6 +1912,63 @@ TEST(AllocateJSObjectFromMap) { } } +TEST(AllocationFoldingCSA) { + Isolate* isolate(CcTest::InitIsolateOnce()); + + const int kNumParams = 1; + const int kNumArrays = 7; + CodeAssemblerTester asm_tester(isolate, kNumParams + 1, + CodeKind::FOR_TESTING); // Include receiver. + CodeStubAssembler m(asm_tester.state()); + + { + TNode length = m.SmiUntag(m.Parameter(1)); + TNode result = m.UncheckedCast(m.AllocateFixedArray( + PACKED_ELEMENTS, length, CodeStubAssembler::AllocationFlag::kNone)); + for (int i = 1; i <= kNumArrays; ++i) { + int array_length = i * kTaggedSize; + TNode array = + m.AllocateByteArray(m.UintPtrConstant(array_length)); + m.StoreFixedArrayElement(result, i - 1, array); + } + m.Return(result); + } + + FunctionTester ft(asm_tester.GenerateCode(), kNumParams); + + { + auto fixed_array_length = Handle(Smi::FromInt(kNumArrays), isolate); + Handle result = + Handle::cast(ft.Call(fixed_array_length).ToHandleChecked()); + CHECK_EQ(result->length(), kNumArrays); + if (V8_COMPRESS_POINTERS_8GB_BOOL) { + CHECK(IsAligned(result->address(), kObjectAlignment8GbHeap)); + } else { + CHECK(IsAligned(result->address(), kTaggedSize)); + } + ByteArray prev_array; + for (int i = 1; i <= kNumArrays; ++i) { + ByteArray current_array = ByteArray::cast(result->get(i - 1)); + if (V8_COMPRESS_POINTERS_8GB_BOOL) { + CHECK(IsAligned(current_array.address(), kObjectAlignment8GbHeap)); + } else { + CHECK(IsAligned(current_array.address(), kTaggedSize)); + } + CHECK_EQ(current_array.length(), i * kTaggedSize); + if (i != 1) { + // TODO(v8:13070): Align prev_array.AllocatedSize() to the allocation + // size. + CHECK_EQ(prev_array.address() + prev_array.AllocatedSize(), + current_array.address()); + } + prev_array = current_array; + } +#ifdef VERIFY_HEAP + HeapVerifier::VerifyHeap(isolate->heap()); +#endif + } +} + namespace { template From 8366df73c38950d17e9519adcc6067c391d88f1d Mon Sep 17 00:00:00 2001 From: Andy Wingo Date: Tue, 13 Sep 2022 16:52:44 +0200 Subject: [PATCH 0085/1772] [stringrefs] Fold wtf8 policy into instruction set Instead of having e.g. `string.new_wtf8` that takes an immediate specifying the particular UTF-8 flavor to parse, make one instruction per flavor. See https://github.com/WebAssembly/stringref/pull/46. Bug: v8:12868 Change-Id: I2e9f2735c557b2352b6e75314037e473710d87a9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892695 Reviewed-by: Jakob Kummerow Commit-Queue: Andy Wingo Cr-Commit-Position: refs/heads/main@{#83170} --- src/builtins/wasm.tq | 20 +- src/compiler/wasm-compiler.cc | 29 +- src/compiler/wasm-compiler.h | 10 +- src/runtime/runtime-wasm.cc | 81 ++--- src/strings/unicode.h | 9 +- src/wasm/baseline/liftoff-compiler.cc | 75 ++-- src/wasm/function-body-decoder-impl.h | 334 ++++++++++-------- src/wasm/graph-builder-interface.cc | 52 +-- src/wasm/wasm-constants.h | 10 - src/wasm/wasm-disassembler.cc | 7 - src/wasm/wasm-opcodes.h | 24 +- test/mjsunit/wasm/stringrefs-exec-gc.js | 25 +- test/mjsunit/wasm/stringrefs-exec.js | 49 +-- test/mjsunit/wasm/stringrefs-valid.js | 129 +++---- test/mjsunit/wasm/wasm-module-builder.js | 26 +- .../wasm/function-body-decoder-unittest.cc | 4 +- 16 files changed, 433 insertions(+), 451 deletions(-) diff --git a/src/builtins/wasm.tq b/src/builtins/wasm.tq index 640bab596f..dbf80befe2 100644 --- a/src/builtins/wasm.tq +++ b/src/builtins/wasm.tq @@ -808,20 +808,20 @@ transitioning javascript builtin ExperimentalWasmConvertStringToArray( } builtin WasmStringNewWtf8( - offset: uint32, size: uint32, memory: Smi, policy: Smi): String { + offset: uint32, size: uint32, memory: Smi, utf8Variant: Smi): String { const instance = LoadInstanceFromFrame(); tail runtime::WasmStringNewWtf8( - LoadContextFromInstance(instance), instance, memory, policy, + LoadContextFromInstance(instance), instance, memory, utf8Variant, WasmUint32ToNumber(offset), WasmUint32ToNumber(size)); } builtin WasmStringNewWtf8Array( - start: uint32, end: uint32, array: WasmArray, policy: Smi): String { + start: uint32, end: uint32, array: WasmArray, utf8Variant: Smi): String { const context = LoadContextFromFrame(); try { if (array.length < end) goto OffsetOutOfRange; if (end < start) goto OffsetOutOfRange; tail runtime::WasmStringNewWtf8Array( - context, policy, array, SmiFromUint32(start), SmiFromUint32(end)); + context, utf8Variant, array, SmiFromUint32(start), SmiFromUint32(end)); } label OffsetOutOfRange deferred { const error = MessageTemplate::kWasmTrapArrayOutOfBounds; runtime::ThrowWasmError(context, SmiConstant(error)); @@ -863,18 +863,18 @@ builtin WasmStringMeasureWtf8(string: String): int32 { return Signed(ChangeNumberToUint32(result)); } builtin WasmStringEncodeWtf8( - string: String, offset: uint32, memory: Smi, policy: Smi): uint32 { + string: String, offset: uint32, memory: Smi, utf8Variant: Smi): uint32 { const instance = LoadInstanceFromFrame(); const result = runtime::WasmStringEncodeWtf8( - LoadContextFromInstance(instance), instance, memory, policy, string, + LoadContextFromInstance(instance), instance, memory, utf8Variant, string, WasmUint32ToNumber(offset)); return ChangeNumberToUint32(result); } builtin WasmStringEncodeWtf8Array( - string: String, array: WasmArray, start: uint32, policy: Smi): uint32 { + string: String, array: WasmArray, start: uint32, utf8Variant: Smi): uint32 { const instance = LoadInstanceFromFrame(); const result = runtime::WasmStringEncodeWtf8Array( - LoadContextFromInstance(instance), policy, string, array, + LoadContextFromInstance(instance), utf8Variant, string, array, WasmUint32ToNumber(start)); return ChangeNumberToUint32(result); } @@ -985,7 +985,7 @@ struct NewPositionAndBytesWritten { } builtin WasmStringViewWtf8Encode( addr: uint32, pos: uint32, bytes: uint32, view: ByteArray, memory: Smi, - policy: Smi): NewPositionAndBytesWritten { + utf8Variant: Smi): NewPositionAndBytesWritten { const start = WasmStringViewWtf8Advance(view, pos, 0); const end = WasmStringViewWtf8Advance(view, start, bytes); const instance = LoadInstanceFromFrame(); @@ -999,7 +999,7 @@ builtin WasmStringViewWtf8Encode( // Always call out to run-time, to catch invalid addr. runtime::WasmStringViewWtf8Encode( - context, instance, policy, view, WasmUint32ToNumber(addr), + context, instance, utf8Variant, view, WasmUint32ToNumber(addr), WasmUint32ToNumber(start), WasmUint32ToNumber(end)); return NewPositionAndBytesWritten{ diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index 532e29f483..04766b2e9d 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -5732,19 +5732,19 @@ void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index, } Node* WasmGraphBuilder::StringNewWtf8(uint32_t memory, - wasm::StringRefWtf8Policy policy, + unibrow::Utf8Variant variant, Node* offset, Node* size) { return gasm_->CallBuiltin(Builtin::kWasmStringNewWtf8, Operator::kNoDeopt, offset, size, gasm_->SmiConstant(memory), - gasm_->SmiConstant(static_cast(policy))); + gasm_->SmiConstant(static_cast(variant))); } -Node* WasmGraphBuilder::StringNewWtf8Array(wasm::StringRefWtf8Policy policy, +Node* WasmGraphBuilder::StringNewWtf8Array(unibrow::Utf8Variant variant, Node* array, Node* start, Node* end) { return gasm_->CallBuiltin(Builtin::kWasmStringNewWtf8Array, Operator::kNoDeopt, start, end, array, - gasm_->SmiConstant(static_cast(policy))); + gasm_->SmiConstant(static_cast(variant))); } Node* WasmGraphBuilder::StringNewWtf16(uint32_t memory, Node* offset, @@ -5794,7 +5794,7 @@ Node* WasmGraphBuilder::StringMeasureWtf16(Node* string, } Node* WasmGraphBuilder::StringEncodeWtf8(uint32_t memory, - wasm::StringRefWtf8Policy policy, + unibrow::Utf8Variant variant, Node* string, CheckForNull null_check, Node* offset, wasm::WasmCodePosition position) { @@ -5803,13 +5803,13 @@ Node* WasmGraphBuilder::StringEncodeWtf8(uint32_t memory, } return gasm_->CallBuiltin(Builtin::kWasmStringEncodeWtf8, Operator::kNoDeopt, string, offset, gasm_->SmiConstant(memory), - gasm_->SmiConstant(policy)); + gasm_->SmiConstant(static_cast(variant))); } Node* WasmGraphBuilder::StringEncodeWtf8Array( - wasm::StringRefWtf8Policy policy, Node* string, - CheckForNull string_null_check, Node* array, CheckForNull array_null_check, - Node* start, wasm::WasmCodePosition position) { + unibrow::Utf8Variant variant, Node* string, CheckForNull string_null_check, + Node* array, CheckForNull array_null_check, Node* start, + wasm::WasmCodePosition position) { if (string_null_check == kWithNullCheck) { string = AssertNotNull(string, position); } @@ -5818,7 +5818,7 @@ Node* WasmGraphBuilder::StringEncodeWtf8Array( } return gasm_->CallBuiltin(Builtin::kWasmStringEncodeWtf8Array, Operator::kNoDeopt, string, array, start, - gasm_->SmiConstant(policy)); + gasm_->SmiConstant(static_cast(variant))); } Node* WasmGraphBuilder::StringEncodeWtf16(uint32_t memory, Node* string, @@ -5900,15 +5900,16 @@ Node* WasmGraphBuilder::StringViewWtf8Advance(Node* view, } void WasmGraphBuilder::StringViewWtf8Encode( - uint32_t memory, wasm::StringRefWtf8Policy policy, Node* view, + uint32_t memory, unibrow::Utf8Variant variant, Node* view, CheckForNull null_check, Node* addr, Node* pos, Node* bytes, Node** next_pos, Node** bytes_written, wasm::WasmCodePosition position) { if (null_check == kWithNullCheck) { view = AssertNotNull(view, position); } - Node* pair = gasm_->CallBuiltin( - Builtin::kWasmStringViewWtf8Encode, Operator::kNoDeopt, addr, pos, bytes, - view, gasm_->SmiConstant(memory), gasm_->SmiConstant(policy)); + Node* pair = + gasm_->CallBuiltin(Builtin::kWasmStringViewWtf8Encode, Operator::kNoDeopt, + addr, pos, bytes, view, gasm_->SmiConstant(memory), + gasm_->SmiConstant(static_cast(variant))); *next_pos = gasm_->Projection(0, pair); *bytes_written = gasm_->Projection(1, pair); } diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h index c8359b63fe..f562f7e5fe 100644 --- a/src/compiler/wasm-compiler.h +++ b/src/compiler/wasm-compiler.h @@ -516,9 +516,9 @@ class WasmGraphBuilder { void BrOnI31(Node* object, Node* rtt, WasmTypeCheckConfig config, Node** match_control, Node** match_effect, Node** no_match_control, Node** no_match_effect); - Node* StringNewWtf8(uint32_t memory, wasm::StringRefWtf8Policy policy, + Node* StringNewWtf8(uint32_t memory, unibrow::Utf8Variant variant, Node* offset, Node* size); - Node* StringNewWtf8Array(wasm::StringRefWtf8Policy policy, Node* array, + Node* StringNewWtf8Array(unibrow::Utf8Variant variant, Node* array, Node* start, Node* end); Node* StringNewWtf16(uint32_t memory, Node* offset, Node* size); Node* StringNewWtf16Array(Node* array, Node* start, Node* end); @@ -529,10 +529,10 @@ class WasmGraphBuilder { wasm::WasmCodePosition position); Node* StringMeasureWtf16(Node* string, CheckForNull null_check, wasm::WasmCodePosition position); - Node* StringEncodeWtf8(uint32_t memory, wasm::StringRefWtf8Policy policy, + Node* StringEncodeWtf8(uint32_t memory, unibrow::Utf8Variant variant, Node* string, CheckForNull null_check, Node* offset, wasm::WasmCodePosition position); - Node* StringEncodeWtf8Array(wasm::StringRefWtf8Policy policy, Node* string, + Node* StringEncodeWtf8Array(unibrow::Utf8Variant variant, Node* string, CheckForNull string_null_check, Node* array, CheckForNull array_null_check, Node* start, wasm::WasmCodePosition position); @@ -553,7 +553,7 @@ class WasmGraphBuilder { wasm::WasmCodePosition position); Node* StringViewWtf8Advance(Node* view, CheckForNull null_check, Node* pos, Node* bytes, wasm::WasmCodePosition position); - void StringViewWtf8Encode(uint32_t memory, wasm::StringRefWtf8Policy policy, + void StringViewWtf8Encode(uint32_t memory, unibrow::Utf8Variant variant, Node* view, CheckForNull null_check, Node* addr, Node* pos, Node* bytes, Node** next_pos, Node** bytes_written, diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 5223e639d7..221fce892f 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -854,20 +854,6 @@ RUNTIME_FUNCTION(Runtime_WasmCreateResumePromise) { return *result; } -namespace { -unibrow::Utf8Variant Utf8VariantFromWtf8Policy( - wasm::StringRefWtf8Policy policy) { - switch (policy) { - case wasm::kWtf8PolicyReject: - return unibrow::Utf8Variant::kUtf8; - case wasm::kWtf8PolicyAccept: - return unibrow::Utf8Variant::kWtf8; - case wasm::kWtf8PolicyReplace: - return unibrow::Utf8Variant::kLossyUtf8; - } -} -} // namespace - // Returns the new string if the operation succeeds. Otherwise throws an // exception and returns an empty result. RUNTIME_FUNCTION(Runtime_WasmStringNewWtf8) { @@ -876,16 +862,16 @@ RUNTIME_FUNCTION(Runtime_WasmStringNewWtf8) { HandleScope scope(isolate); WasmInstanceObject instance = WasmInstanceObject::cast(args[0]); uint32_t memory = args.positive_smi_value_at(1); - uint32_t policy_value = args.positive_smi_value_at(2); + uint32_t utf8_variant_value = args.positive_smi_value_at(2); uint32_t offset = NumberToUint32(args[3]); uint32_t size = NumberToUint32(args[4]); DCHECK_EQ(memory, 0); USE(memory); - DCHECK(policy_value <= wasm::kLastWtf8Policy); + DCHECK(utf8_variant_value <= + static_cast(unibrow::Utf8Variant::kLastUtf8Variant)); - auto policy = static_cast(policy_value); - auto utf8_variant = Utf8VariantFromWtf8Policy(policy); + auto utf8_variant = static_cast(utf8_variant_value); uint64_t mem_size = instance.memory_size(); if (!base::IsInBounds(offset, size, mem_size)) { @@ -902,14 +888,14 @@ RUNTIME_FUNCTION(Runtime_WasmStringNewWtf8Array) { ClearThreadInWasmScope flag_scope(isolate); DCHECK_EQ(4, args.length()); HandleScope scope(isolate); - uint32_t policy_value = args.positive_smi_value_at(0); + uint32_t utf8_variant_value = args.positive_smi_value_at(0); Handle array(WasmArray::cast(args[1]), isolate); uint32_t start = NumberToUint32(args[2]); uint32_t end = NumberToUint32(args[3]); - DCHECK(policy_value <= wasm::kLastWtf8Policy); - auto policy = static_cast(policy_value); - auto utf8_variant = Utf8VariantFromWtf8Policy(policy); + DCHECK(utf8_variant_value <= + static_cast(unibrow::Utf8Variant::kLastUtf8Variant)); + auto utf8_variant = static_cast(utf8_variant_value); RETURN_RESULT_OR_FAILURE(isolate, isolate->factory()->NewStringFromUtf8( array, start, end, utf8_variant)); @@ -1023,7 +1009,7 @@ bool HasUnpairedSurrogate(base::Vector wtf16) { // TODO(12868): Consider unifying with api.cc:String::WriteUtf8. template int EncodeWtf8(base::Vector bytes, size_t offset, - base::Vector wtf16, wasm::StringRefWtf8Policy policy, + base::Vector wtf16, unibrow::Utf8Variant variant, MessageTemplate* message, MessageTemplate out_of_bounds) { // The first check is a quick estimate to decide whether the second check // is worth the computation. @@ -1034,16 +1020,16 @@ int EncodeWtf8(base::Vector bytes, size_t offset, } bool replace_invalid = false; - switch (policy) { - case wasm::kWtf8PolicyAccept: + switch (variant) { + case unibrow::Utf8Variant::kWtf8: break; - case wasm::kWtf8PolicyReject: + case unibrow::Utf8Variant::kUtf8: if (HasUnpairedSurrogate(wtf16)) { *message = MessageTemplate::kWasmTrapStringIsolatedSurrogate; return -1; } break; - case wasm::kWtf8PolicyReplace: + case unibrow::Utf8Variant::kLossyUtf8: replace_invalid = true; break; default: @@ -1061,7 +1047,7 @@ int EncodeWtf8(base::Vector bytes, size_t offset, return static_cast(dst - dst_start); } template -Object EncodeWtf8(Isolate* isolate, wasm::StringRefWtf8Policy policy, +Object EncodeWtf8(Isolate* isolate, unibrow::Utf8Variant variant, Handle string, GetWritableBytes get_writable_bytes, size_t offset, MessageTemplate out_of_bounds_message) { string = String::Flatten(isolate, string); @@ -1072,9 +1058,9 @@ Object EncodeWtf8(Isolate* isolate, wasm::StringRefWtf8Policy policy, String::FlatContent content = string->GetFlatContent(no_gc); base::Vector dst = get_writable_bytes(no_gc); written = content.IsOneByte() - ? EncodeWtf8(dst, offset, content.ToOneByteVector(), policy, + ? EncodeWtf8(dst, offset, content.ToOneByteVector(), variant, &message, out_of_bounds_message) - : EncodeWtf8(dst, offset, content.ToUC16Vector(), policy, + : EncodeWtf8(dst, offset, content.ToUC16Vector(), variant, &message, out_of_bounds_message); } if (written < 0) { @@ -1128,21 +1114,22 @@ RUNTIME_FUNCTION(Runtime_WasmStringEncodeWtf8) { HandleScope scope(isolate); WasmInstanceObject instance = WasmInstanceObject::cast(args[0]); uint32_t memory = args.positive_smi_value_at(1); - uint32_t policy_value = args.positive_smi_value_at(2); + uint32_t utf8_variant_value = args.positive_smi_value_at(2); Handle string(String::cast(args[3]), isolate); uint32_t offset = NumberToUint32(args[4]); DCHECK_EQ(memory, 0); USE(memory); - DCHECK(policy_value <= wasm::kLastWtf8Policy); + DCHECK(utf8_variant_value <= + static_cast(unibrow::Utf8Variant::kLastUtf8Variant)); char* memory_start = reinterpret_cast(instance.memory_start()); - auto policy = static_cast(policy_value); + auto utf8_variant = static_cast(utf8_variant_value); auto get_writable_bytes = [&](const DisallowGarbageCollection&) -> base::Vector { return {memory_start, instance.memory_size()}; }; - return EncodeWtf8(isolate, policy, string, get_writable_bytes, offset, + return EncodeWtf8(isolate, utf8_variant, string, get_writable_bytes, offset, MessageTemplate::kWasmTrapMemOutOfBounds); } @@ -1150,18 +1137,19 @@ RUNTIME_FUNCTION(Runtime_WasmStringEncodeWtf8Array) { ClearThreadInWasmScope flag_scope(isolate); DCHECK_EQ(4, args.length()); HandleScope scope(isolate); - uint32_t policy_value = args.positive_smi_value_at(0); + uint32_t utf8_variant_value = args.positive_smi_value_at(0); Handle string(String::cast(args[1]), isolate); Handle array(WasmArray::cast(args[2]), isolate); uint32_t start = NumberToUint32(args[3]); - DCHECK(policy_value <= wasm::kLastWtf8Policy); - auto policy = static_cast(policy_value); + DCHECK(utf8_variant_value <= + static_cast(unibrow::Utf8Variant::kLastUtf8Variant)); + auto utf8_variant = static_cast(utf8_variant_value); auto get_writable_bytes = [&](const DisallowGarbageCollection&) -> base::Vector { return {reinterpret_cast(array->ElementAddress(0)), array->length()}; }; - return EncodeWtf8(isolate, policy, string, get_writable_bytes, start, + return EncodeWtf8(isolate, utf8_variant, string, get_writable_bytes, start, MessageTemplate::kWasmTrapArrayOutOfBounds); } @@ -1215,13 +1203,13 @@ RUNTIME_FUNCTION(Runtime_WasmStringAsWtf8) { int wtf8_length = MeasureWtf8(isolate, string); Handle array = isolate->factory()->NewByteArray(wtf8_length); - wasm::StringRefWtf8Policy policy = wasm::kWtf8PolicyAccept; + auto utf8_variant = unibrow::Utf8Variant::kWtf8; auto get_writable_bytes = [&](const DisallowGarbageCollection&) -> base::Vector { return {reinterpret_cast(array->GetDataStartAddress()), static_cast(wtf8_length)}; }; - EncodeWtf8(isolate, policy, string, get_writable_bytes, 0, + EncodeWtf8(isolate, utf8_variant, string, get_writable_bytes, 0, MessageTemplate::kWasmTrapArrayOutOfBounds); return *array; } @@ -1231,17 +1219,18 @@ RUNTIME_FUNCTION(Runtime_WasmStringViewWtf8Encode) { DCHECK_EQ(6, args.length()); HandleScope scope(isolate); WasmInstanceObject instance = WasmInstanceObject::cast(args[0]); - uint32_t policy_value = args.positive_smi_value_at(1); + uint32_t utf8_variant_value = args.positive_smi_value_at(1); Handle array(ByteArray::cast(args[2]), isolate); uint32_t addr = NumberToUint32(args[3]); uint32_t start = NumberToUint32(args[4]); uint32_t end = NumberToUint32(args[5]); - DCHECK(policy_value <= wasm::kLastWtf8Policy); + DCHECK(utf8_variant_value <= + static_cast(unibrow::Utf8Variant::kLastUtf8Variant)); DCHECK_LE(start, end); DCHECK(base::IsInBounds(start, end - start, array->length())); - auto policy = static_cast(policy_value); + auto utf8_variant = static_cast(utf8_variant_value); size_t length = end - start; if (!base::IsInBounds(addr, length, instance.memory_size())) { @@ -1254,9 +1243,9 @@ RUNTIME_FUNCTION(Runtime_WasmStringViewWtf8Encode) { byte* dst = memory_start + addr; std::vector surrogates; - if (policy != wasm::kWtf8PolicyAccept) { + if (utf8_variant != unibrow::Utf8Variant::kWtf8) { unibrow::Wtf8::ScanForSurrogates({src, length}, &surrogates); - if (policy == wasm::kWtf8PolicyReject && !surrogates.empty()) { + if (utf8_variant == unibrow::Utf8Variant::kUtf8 && !surrogates.empty()) { return ThrowWasmError(isolate, MessageTemplate::kWasmTrapStringIsolatedSurrogate); } @@ -1266,7 +1255,7 @@ RUNTIME_FUNCTION(Runtime_WasmStringViewWtf8Encode) { for (size_t surrogate : surrogates) { DCHECK_LT(surrogate, length); - DCHECK_EQ(policy, wasm::kWtf8PolicyReplace); + DCHECK_EQ(utf8_variant, unibrow::Utf8Variant::kLossyUtf8); unibrow::Utf8::Encode(reinterpret_cast(dst + surrogate), unibrow::Utf8::kBadChar, 0, false); } diff --git a/src/strings/unicode.h b/src/strings/unicode.h index 979c36d2c0..4b73bd4c3c 100644 --- a/src/strings/unicode.h +++ b/src/strings/unicode.h @@ -154,16 +154,17 @@ class Latin1 { }; enum class Utf8Variant : uint8_t { - kLossyUtf8, // Lossy UTF-8: Any byte sequence can be decoded without - // error, replacing invalid UTF-8 with the replacement - // character (U+FFFD). Any sequence of codepoints can be - // encoded without error, replacing surrogates with U+FFFD. #if V8_ENABLE_WEBASSEMBLY kUtf8, // UTF-8. Decoding an invalid byte sequence or encoding a // surrogate codepoint signals an error. kWtf8, // WTF-8: like UTF-8, but allows isolated (but not paired) // surrogate codepoints to be encoded and decoded. #endif + kLossyUtf8, // Lossy UTF-8: Any byte sequence can be decoded without + // error, replacing invalid UTF-8 with the replacement + // character (U+FFFD). Any sequence of codepoints can be + // encoded without error, replacing surrogates with U+FFFD. + kLastUtf8Variant = kLossyUtf8 }; class V8_EXPORT_PRIVATE Utf8 { diff --git a/src/wasm/baseline/liftoff-compiler.cc b/src/wasm/baseline/liftoff-compiler.cc index deacfaf943..a9466a4605 100644 --- a/src/wasm/baseline/liftoff-compiler.cc +++ b/src/wasm/baseline/liftoff-compiler.cc @@ -6262,19 +6262,20 @@ class LiftoffCompiler { } void StringNewWtf8(FullDecoder* decoder, - const EncodeWtf8Immediate& imm, - const Value& offset, const Value& size, Value* result) { + const MemoryIndexImmediate& imm, + const unibrow::Utf8Variant variant, const Value& offset, + const Value& size, Value* result) { LiftoffRegList pinned; LiftoffRegister memory_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LoadSmi(memory_reg, imm.memory.index); + LoadSmi(memory_reg, imm.index); LiftoffAssembler::VarState memory_var(kSmiKind, memory_reg, 0); - LiftoffRegister policy_reg = + LiftoffRegister variant_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LoadSmi(policy_reg, static_cast(imm.policy.value)); - LiftoffAssembler::VarState policy_var(kSmiKind, policy_reg, 0); + LoadSmi(variant_reg, static_cast(variant)); + LiftoffAssembler::VarState variant_var(kSmiKind, variant_reg, 0); CallRuntimeStub( WasmCode::kWasmStringNewWtf8, @@ -6283,7 +6284,7 @@ class LiftoffCompiler { __ cache_state()->stack_state.end()[-2], // offset __ cache_state()->stack_state.end()[-1], // size memory_var, - policy_var, + variant_var, }, decoder->position()); __ cache_state()->stack_state.pop_back(2); @@ -6294,7 +6295,7 @@ class LiftoffCompiler { } void StringNewWtf8Array(FullDecoder* decoder, - const Wtf8PolicyImmediate& imm, + const unibrow::Utf8Variant variant, const Value& array, const Value& start, const Value& end, Value* result) { LiftoffRegList pinned; @@ -6304,10 +6305,10 @@ class LiftoffCompiler { MaybeEmitNullCheck(decoder, array_reg.gp(), pinned, array.type); LiftoffAssembler::VarState array_var(kRef, array_reg, 0); - LiftoffRegister policy_reg = + LiftoffRegister variant_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LoadSmi(policy_reg, static_cast(imm.value)); - LiftoffAssembler::VarState policy_var(kSmiKind, policy_reg, 0); + LoadSmi(variant_reg, static_cast(variant)); + LiftoffAssembler::VarState variant_var(kSmiKind, variant_reg, 0); CallRuntimeStub(WasmCode::kWasmStringNewWtf8Array, MakeSig::Returns(kRef).Params(kI32, kI32, kRef, kSmiKind), @@ -6315,7 +6316,7 @@ class LiftoffCompiler { __ cache_state()->stack_state.end()[-2], // start __ cache_state()->stack_state.end()[-1], // end array_var, - policy_var, + variant_var, }, decoder->position()); __ cache_state()->stack_state.pop_back(3); @@ -6395,20 +6396,20 @@ class LiftoffCompiler { } void StringMeasureWtf8(FullDecoder* decoder, - const Wtf8PolicyImmediate& imm, - const Value& str, Value* result) { + const unibrow::Utf8Variant variant, const Value& str, + Value* result) { LiftoffRegList pinned; LiftoffRegister string_reg = pinned.set(__ PopToRegister(pinned)); MaybeEmitNullCheck(decoder, string_reg.gp(), pinned, str.type); LiftoffAssembler::VarState string_var(kRef, string_reg, 0); WasmCode::RuntimeStubId stub_id; - switch (imm.value) { - case kWtf8PolicyReject: + switch (variant) { + case unibrow::Utf8Variant::kUtf8: stub_id = WasmCode::kWasmStringMeasureUtf8; break; - case kWtf8PolicyAccept: - case kWtf8PolicyReplace: + case unibrow::Utf8Variant::kLossyUtf8: + case unibrow::Utf8Variant::kWtf8: stub_id = WasmCode::kWasmStringMeasureWtf8; break; } @@ -6436,8 +6437,9 @@ class LiftoffCompiler { } void StringEncodeWtf8(FullDecoder* decoder, - const EncodeWtf8Immediate& imm, - const Value& str, const Value& offset, Value* result) { + const MemoryIndexImmediate& imm, + const unibrow::Utf8Variant variant, const Value& str, + const Value& offset, Value* result) { LiftoffRegList pinned; LiftoffAssembler::VarState& offset_var = @@ -6450,13 +6452,13 @@ class LiftoffCompiler { LiftoffRegister memory_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LoadSmi(memory_reg, imm.memory.index); + LoadSmi(memory_reg, imm.index); LiftoffAssembler::VarState memory_var(kSmiKind, memory_reg, 0); - LiftoffRegister policy_reg = + LiftoffRegister variant_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LoadSmi(policy_reg, static_cast(imm.policy.value)); - LiftoffAssembler::VarState policy_var(kSmiKind, policy_reg, 0); + LoadSmi(variant_reg, static_cast(variant)); + LiftoffAssembler::VarState variant_var(kSmiKind, variant_reg, 0); CallRuntimeStub( WasmCode::kWasmStringEncodeWtf8, @@ -6465,7 +6467,7 @@ class LiftoffCompiler { string_var, offset_var, memory_var, - policy_var, + variant_var, }, decoder->position()); __ DropValues(2); @@ -6476,7 +6478,7 @@ class LiftoffCompiler { } void StringEncodeWtf8Array(FullDecoder* decoder, - const Wtf8PolicyImmediate& imm, + const unibrow::Utf8Variant variant, const Value& str, const Value& array, const Value& start, Value* result) { LiftoffRegList pinned; @@ -6494,10 +6496,10 @@ class LiftoffCompiler { LiftoffAssembler::VarState& start_var = __ cache_state()->stack_state.end()[-1]; - LiftoffRegister policy_reg = + LiftoffRegister variant_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LoadSmi(policy_reg, static_cast(imm.value)); - LiftoffAssembler::VarState policy_var(kSmiKind, policy_reg, 0); + LoadSmi(variant_reg, static_cast(variant)); + LiftoffAssembler::VarState variant_var(kSmiKind, variant_reg, 0); CallRuntimeStub(WasmCode::kWasmStringEncodeWtf8Array, MakeSig::Returns(kI32).Params(kRef, kRef, kI32, kSmiKind), @@ -6505,7 +6507,7 @@ class LiftoffCompiler { string_var, array_var, start_var, - policy_var, + variant_var, }, decoder->position()); __ DropValues(3); @@ -6737,7 +6739,8 @@ class LiftoffCompiler { } void StringViewWtf8Encode(FullDecoder* decoder, - const EncodeWtf8Immediate& imm, + const MemoryIndexImmediate& imm, + const unibrow::Utf8Variant variant, const Value& view, const Value& addr, const Value& pos, const Value& bytes, Value* next_pos, Value* bytes_written) { @@ -6757,13 +6760,13 @@ class LiftoffCompiler { LiftoffRegister memory_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LoadSmi(memory_reg, imm.memory.index); + LoadSmi(memory_reg, imm.index); LiftoffAssembler::VarState memory_var(kSmiKind, memory_reg, 0); - LiftoffRegister policy_reg = + LiftoffRegister variant_reg = pinned.set(__ GetUnusedRegister(kGpReg, pinned)); - LoadSmi(policy_reg, static_cast(imm.policy.value)); - LiftoffAssembler::VarState policy_var(kSmiKind, policy_reg, 0); + LoadSmi(variant_reg, static_cast(variant)); + LiftoffAssembler::VarState variant_var(kSmiKind, variant_reg, 0); CallRuntimeStub(WasmCode::kWasmStringViewWtf8Encode, MakeSig::Returns(kI32, kI32) @@ -6774,7 +6777,7 @@ class LiftoffCompiler { bytes_var, view_var, memory_var, - policy_var, + variant_var, }, decoder->position()); __ DropValues(4); diff --git a/src/wasm/function-body-decoder-impl.h b/src/wasm/function-body-decoder-impl.h index d30ea12091..f8ca69d848 100644 --- a/src/wasm/function-body-decoder-impl.h +++ b/src/wasm/function-body-decoder-impl.h @@ -17,6 +17,7 @@ #include "src/base/small-vector.h" #include "src/base/strings.h" #include "src/base/v8-fallthrough.h" +#include "src/strings/unicode.h" #include "src/utils/bit-vector.h" #include "src/wasm/decoder.h" #include "src/wasm/function-body-decoder.h" @@ -788,33 +789,6 @@ struct StringConstImmediate { } }; -template -struct Wtf8PolicyImmediate { - StringRefWtf8Policy value; - const uint32_t length = 1; - - Wtf8PolicyImmediate(Decoder* decoder, const byte* pc) { - uint8_t u8 = decoder->read_u8(pc, "wtf8 policy"); - if (!VALIDATE(u8 <= kLastWtf8Policy)) { - DecodeError( - decoder, pc, "expected wtf8 policy 0, 1, or 2, but found %u", u8); - } - value = static_cast(u8); - } -}; - -template -struct EncodeWtf8Immediate { - MemoryIndexImmediate memory; - Wtf8PolicyImmediate policy; - uint32_t length; - - EncodeWtf8Immediate(Decoder* decoder, const byte* pc) - : memory(decoder, pc), - policy(decoder, pc + memory.length), - length(memory.length + policy.length) {} -}; - template struct PcForErrors { explicit PcForErrors(const byte* /* pc */) {} @@ -1112,20 +1086,22 @@ struct ControlBase : public PcForErrors { uint32_t br_depth) \ F(BrOnNonArray, const Value& object, Value* value_on_fallthrough, \ uint32_t br_depth) \ - F(StringNewWtf8, const EncodeWtf8Immediate& imm, \ - const Value& offset, const Value& size, Value* result) \ - F(StringNewWtf8Array, const Wtf8PolicyImmediate& imm, \ + F(StringNewWtf8, const MemoryIndexImmediate& memory, \ + const unibrow::Utf8Variant variant, const Value& offset, \ + const Value& size, Value* result) \ + F(StringNewWtf8Array, const unibrow::Utf8Variant variant, \ const Value& array, const Value& start, const Value& end, Value* result) \ - F(StringNewWtf16, const MemoryIndexImmediate& imm, \ + F(StringNewWtf16, const MemoryIndexImmediate& memory, \ const Value& offset, const Value& size, Value* result) \ F(StringNewWtf16Array, const Value& array, const Value& start, \ const Value& end, Value* result) \ - F(StringMeasureWtf8, const Wtf8PolicyImmediate& imm, \ - const Value& str, Value* result) \ + F(StringMeasureWtf8, const unibrow::Utf8Variant variant, const Value& str, \ + Value* result) \ F(StringMeasureWtf16, const Value& str, Value* result) \ - F(StringEncodeWtf8, const EncodeWtf8Immediate& memory, \ - const Value& str, const Value& address, Value* result) \ - F(StringEncodeWtf8Array, const Wtf8PolicyImmediate& imm, \ + F(StringEncodeWtf8, const MemoryIndexImmediate& memory, \ + const unibrow::Utf8Variant variant, const Value& str, \ + const Value& address, Value* result) \ + F(StringEncodeWtf8Array, const unibrow::Utf8Variant variant, \ const Value& str, const Value& array, const Value& start, Value* result) \ F(StringEncodeWtf16, const MemoryIndexImmediate& memory, \ const Value& str, const Value& address, Value* result) \ @@ -1137,9 +1113,10 @@ struct ControlBase : public PcForErrors { F(StringAsWtf8, const Value& str, Value* result) \ F(StringViewWtf8Advance, const Value& view, const Value& pos, \ const Value& bytes, Value* result) \ - F(StringViewWtf8Encode, const EncodeWtf8Immediate& memory, \ - const Value& view, const Value& addr, const Value& pos, \ - const Value& bytes, Value* next_pos, Value* bytes_written) \ + F(StringViewWtf8Encode, const MemoryIndexImmediate& memory, \ + const unibrow::Utf8Variant variant, const Value& view, const Value& addr, \ + const Value& pos, const Value& bytes, Value* next_pos, \ + Value* bytes_written) \ F(StringViewWtf8Slice, const Value& view, const Value& start, \ const Value& end, Value* result) \ F(StringAsWtf16, const Value& str, Value* result) \ @@ -1577,10 +1554,6 @@ class WasmDecoder : public Decoder { return true; } - bool Validate(const byte* pc, EncodeWtf8Immediate& imm) { - return Validate(pc, imm.memory); - } - bool Validate(const byte* pc, StringConstImmediate& imm) { if (!VALIDATE(imm.index < module_->stringref_literals.size())) { DecodeError(pc, "Invalid string literal index: %u", imm.index); @@ -1667,7 +1640,6 @@ class WasmDecoder : public Decoder { void SimdLane(SimdLaneImmediate& imm) {} void Field(FieldImmediate& imm) {} void Length(IndexImmediate& imm) {} - void Wtf8Policy(Wtf8PolicyImmediate& imm) {} void TagIndex(TagIndexImmediate& imm) {} void FunctionIndex(IndexImmediate& imm) {} @@ -2083,6 +2055,15 @@ class WasmDecoder : public Decoder { case kExprExternExternalize: case kExprArrayLen: return length; + case kExprStringNewUtf8: + case kExprStringNewLossyUtf8: + case kExprStringNewWtf8: + case kExprStringEncodeUtf8: + case kExprStringEncodeLossyUtf8: + case kExprStringEncodeWtf8: + case kExprStringViewWtf8EncodeUtf8: + case kExprStringViewWtf8EncodeLossyUtf8: + case kExprStringViewWtf8EncodeWtf8: case kExprStringNewWtf16: case kExprStringEncodeWtf16: case kExprStringViewWtf16Encode: { @@ -2090,26 +2071,19 @@ class WasmDecoder : public Decoder { if (io) io->MemoryIndex(imm); return length + imm.length; } - case kExprStringNewWtf8: - case kExprStringEncodeWtf8: - case kExprStringViewWtf8Encode: { - EncodeWtf8Immediate imm(decoder, pc + length); - if (io) io->MemoryIndex(imm.memory); - if (io) io->Wtf8Policy(imm.policy); - return length + imm.length; - } case kExprStringConst: { StringConstImmediate imm(decoder, pc + length); if (io) io->StringConst(imm); return length + imm.length; } + case kExprStringMeasureUtf8: + case kExprStringMeasureWtf8: + case kExprStringNewUtf8Array: + case kExprStringNewLossyUtf8Array: case kExprStringNewWtf8Array: + case kExprStringEncodeUtf8Array: + case kExprStringEncodeLossyUtf8Array: case kExprStringEncodeWtf8Array: - case kExprStringMeasureWtf8: { - Wtf8PolicyImmediate imm(decoder, pc + length); - if (io) io->Wtf8Policy(imm); - return length + imm.length; - } case kExprStringMeasureWtf16: case kExprStringConcat: case kExprStringEq: @@ -2314,6 +2288,7 @@ class WasmDecoder : public Decoder { } case kExprStringConst: return { 0, 1 }; + case kExprStringMeasureUtf8: case kExprStringMeasureWtf8: case kExprStringMeasureWtf16: case kExprStringIsUSVSequence: @@ -2323,6 +2298,8 @@ class WasmDecoder : public Decoder { case kExprStringViewWtf16Length: case kExprStringViewIterNext: return { 1, 1 }; + case kExprStringNewUtf8: + case kExprStringNewLossyUtf8: case kExprStringNewWtf8: case kExprStringNewWtf16: case kExprStringConcat: @@ -2332,9 +2309,15 @@ class WasmDecoder : public Decoder { case kExprStringViewIterRewind: case kExprStringViewIterSlice: return { 2, 1 }; + case kExprStringNewUtf8Array: + case kExprStringNewLossyUtf8Array: case kExprStringNewWtf8Array: case kExprStringNewWtf16Array: + case kExprStringEncodeUtf8: + case kExprStringEncodeLossyUtf8: case kExprStringEncodeWtf8: + case kExprStringEncodeUtf8Array: + case kExprStringEncodeLossyUtf8Array: case kExprStringEncodeWtf8Array: case kExprStringEncodeWtf16: case kExprStringEncodeWtf16Array: @@ -2344,7 +2327,9 @@ class WasmDecoder : public Decoder { return { 3, 1 }; case kExprStringViewWtf16Encode: return { 4, 1 }; - case kExprStringViewWtf8Encode: + case kExprStringViewWtf8EncodeUtf8: + case kExprStringViewWtf8EncodeLossyUtf8: + case kExprStringViewWtf8EncodeWtf8: return { 4, 2 }; default: UNREACHABLE(); @@ -3692,7 +3677,7 @@ class WasmFullDecoder : public WasmDecoder { WasmOpcode full_opcode = this->template read_prefixed_opcode( this->pc_, &opcode_length, "gc index"); trace_msg->AppendOpcode(full_opcode); - if (full_opcode >= kExprStringNewWtf8) { + if (full_opcode >= kExprStringNewUtf8) { CHECK_PROTOTYPE_OPCODE(stringref); return DecodeStringRefOpcode(full_opcode, opcode_length); } else { @@ -5146,22 +5131,108 @@ class WasmFullDecoder : public WasmDecoder { enum class WasmArrayAccess { kRead, kWrite }; + int DecodeStringNewWtf8(unibrow::Utf8Variant variant, + uint32_t opcode_length) { + NON_CONST_ONLY + MemoryIndexImmediate memory(this, this->pc_ + opcode_length); + if (!this->Validate(this->pc_ + opcode_length, memory)) return 0; + ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32; + Value offset = Peek(1, 0, addr_type); + Value size = Peek(0, 1, kWasmI32); + Value result = CreateValue(ValueType::Ref(HeapType::kString)); + CALL_INTERFACE_IF_OK_AND_REACHABLE(StringNewWtf8, memory, variant, offset, + size, &result); + Drop(2); + Push(result); + return opcode_length + memory.length; + } + + int DecodeStringMeasureWtf8(unibrow::Utf8Variant variant, + uint32_t opcode_length) { + NON_CONST_ONLY + Value str = Peek(0, 0, kWasmStringRef); + Value result = CreateValue(kWasmI32); + CALL_INTERFACE_IF_OK_AND_REACHABLE(StringMeasureWtf8, variant, str, + &result); + Drop(str); + Push(result); + return opcode_length; + } + + int DecodeStringEncodeWtf8(unibrow::Utf8Variant variant, + uint32_t opcode_length) { + NON_CONST_ONLY + MemoryIndexImmediate memory(this, this->pc_ + opcode_length); + if (!this->Validate(this->pc_ + opcode_length, memory)) return 0; + ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32; + Value str = Peek(1, 0, kWasmStringRef); + Value addr = Peek(0, 1, addr_type); + Value result = CreateValue(kWasmI32); + CALL_INTERFACE_IF_OK_AND_REACHABLE(StringEncodeWtf8, memory, variant, str, + addr, &result); + Drop(2); + Push(result); + return opcode_length + memory.length; + } + + int DecodeStringViewWtf8Encode(unibrow::Utf8Variant variant, + uint32_t opcode_length) { + NON_CONST_ONLY + MemoryIndexImmediate memory(this, this->pc_ + opcode_length); + if (!this->Validate(this->pc_ + opcode_length, memory)) return 0; + ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32; + Value view = Peek(3, 0, kWasmStringViewWtf8); + Value addr = Peek(2, 1, addr_type); + Value pos = Peek(1, 2, kWasmI32); + Value bytes = Peek(0, 3, kWasmI32); + Value next_pos = CreateValue(kWasmI32); + Value bytes_out = CreateValue(kWasmI32); + CALL_INTERFACE_IF_OK_AND_REACHABLE(StringViewWtf8Encode, memory, variant, + view, addr, pos, bytes, &next_pos, + &bytes_out); + Drop(4); + Push(next_pos); + Push(bytes_out); + return opcode_length + memory.length; + } + + int DecodeStringNewWtf8Array(unibrow::Utf8Variant variant, + uint32_t opcode_length) { + NON_CONST_ONLY + Value array = PeekPackedArray(2, 0, kWasmI8, WasmArrayAccess::kRead); + Value start = Peek(1, 1, kWasmI32); + Value end = Peek(0, 2, kWasmI32); + Value result = CreateValue(ValueType::Ref(HeapType::kString)); + CALL_INTERFACE_IF_OK_AND_REACHABLE(StringNewWtf8Array, variant, array, + start, end, &result); + Drop(3); + Push(result); + return opcode_length; + } + + int DecodeStringEncodeWtf8Array(unibrow::Utf8Variant variant, + uint32_t opcode_length) { + NON_CONST_ONLY + Value str = Peek(2, 0, kWasmStringRef); + Value array = PeekPackedArray(1, 1, kWasmI8, WasmArrayAccess::kWrite); + Value start = Peek(0, 2, kWasmI32); + Value result = CreateValue(kWasmI32); + CALL_INTERFACE_IF_OK_AND_REACHABLE(StringEncodeWtf8Array, variant, str, + array, start, &result); + Drop(3); + Push(result); + return opcode_length; + } + int DecodeStringRefOpcode(WasmOpcode opcode, uint32_t opcode_length) { switch (opcode) { - case kExprStringNewWtf8: { - NON_CONST_ONLY - EncodeWtf8Immediate imm(this, this->pc_ + opcode_length); - if (!this->Validate(this->pc_ + opcode_length, imm)) return 0; - ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32; - Value offset = Peek(1, 0, addr_type); - Value size = Peek(0, 1, kWasmI32); - Value result = CreateValue(ValueType::Ref(HeapType::kString)); - CALL_INTERFACE_IF_OK_AND_REACHABLE(StringNewWtf8, imm, offset, size, - &result); - Drop(2); - Push(result); - return opcode_length + imm.length; - } + case kExprStringNewUtf8: + return DecodeStringNewWtf8(unibrow::Utf8Variant::kUtf8, opcode_length); + case kExprStringNewLossyUtf8: + return DecodeStringNewWtf8(unibrow::Utf8Variant::kLossyUtf8, + opcode_length); + case kExprStringNewWtf8: + return DecodeStringNewWtf8(unibrow::Utf8Variant::kWtf8, opcode_length); case kExprStringNewWtf16: { NON_CONST_ONLY MemoryIndexImmediate imm(this, this->pc_ + opcode_length); @@ -5184,17 +5255,12 @@ class WasmFullDecoder : public WasmDecoder { Push(result); return opcode_length + imm.length; } - case kExprStringMeasureWtf8: { - NON_CONST_ONLY - Wtf8PolicyImmediate imm(this, this->pc_ + opcode_length); - Value str = Peek(0, 0, kWasmStringRef); - Value result = CreateValue(kWasmI32); - CALL_INTERFACE_IF_OK_AND_REACHABLE(StringMeasureWtf8, imm, str, - &result); - Drop(str); - Push(result); - return opcode_length + imm.length; - } + case kExprStringMeasureUtf8: + return DecodeStringMeasureWtf8(unibrow::Utf8Variant::kUtf8, + opcode_length); + case kExprStringMeasureWtf8: + return DecodeStringMeasureWtf8(unibrow::Utf8Variant::kWtf8, + opcode_length); case kExprStringMeasureWtf16: { NON_CONST_ONLY Value str = Peek(0, 0, kWasmStringRef); @@ -5204,20 +5270,15 @@ class WasmFullDecoder : public WasmDecoder { Push(result); return opcode_length; } - case kExprStringEncodeWtf8: { - NON_CONST_ONLY - EncodeWtf8Immediate imm(this, this->pc_ + opcode_length); - if (!this->Validate(this->pc_ + opcode_length, imm)) return 0; - ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32; - Value str = Peek(1, 0, kWasmStringRef); - Value addr = Peek(0, 1, addr_type); - Value result = CreateValue(kWasmI32); - CALL_INTERFACE_IF_OK_AND_REACHABLE(StringEncodeWtf8, imm, str, addr, - &result); - Drop(2); - Push(result); - return opcode_length + imm.length; - } + case kExprStringEncodeUtf8: + return DecodeStringEncodeWtf8(unibrow::Utf8Variant::kUtf8, + opcode_length); + case kExprStringEncodeLossyUtf8: + return DecodeStringEncodeWtf8(unibrow::Utf8Variant::kLossyUtf8, + opcode_length); + case kExprStringEncodeWtf8: + return DecodeStringEncodeWtf8(unibrow::Utf8Variant::kWtf8, + opcode_length); case kExprStringEncodeWtf16: { NON_CONST_ONLY MemoryIndexImmediate imm(this, this->pc_ + opcode_length); @@ -5282,25 +5343,15 @@ class WasmFullDecoder : public WasmDecoder { Push(result); return opcode_length; } - case kExprStringViewWtf8Encode: { - NON_CONST_ONLY - EncodeWtf8Immediate imm(this, this->pc_ + opcode_length); - if (!this->Validate(this->pc_ + opcode_length, imm)) return 0; - ValueType addr_type = this->module_->is_memory64 ? kWasmI64 : kWasmI32; - Value view = Peek(3, 0, kWasmStringViewWtf8); - Value addr = Peek(2, 1, addr_type); - Value pos = Peek(1, 2, kWasmI32); - Value bytes = Peek(0, 3, kWasmI32); - Value next_pos = CreateValue(kWasmI32); - Value bytes_out = CreateValue(kWasmI32); - CALL_INTERFACE_IF_OK_AND_REACHABLE(StringViewWtf8Encode, imm, view, - addr, pos, bytes, &next_pos, - &bytes_out); - Drop(4); - Push(next_pos); - Push(bytes_out); - return opcode_length + imm.length; - } + case kExprStringViewWtf8EncodeUtf8: + return DecodeStringViewWtf8Encode(unibrow::Utf8Variant::kUtf8, + opcode_length); + case kExprStringViewWtf8EncodeLossyUtf8: + return DecodeStringViewWtf8Encode(unibrow::Utf8Variant::kLossyUtf8, + opcode_length); + case kExprStringViewWtf8EncodeWtf8: + return DecodeStringViewWtf8Encode(unibrow::Utf8Variant::kWtf8, + opcode_length); case kExprStringViewWtf8Slice: { NON_CONST_ONLY Value view = Peek(2, 0, kWasmStringViewWtf8); @@ -5370,7 +5421,6 @@ class WasmFullDecoder : public WasmDecoder { Push(result); return opcode_length; } - case kExprStringAsIter: { NON_CONST_ONLY Value str = Peek(0, 0, kWasmStringRef); @@ -5422,20 +5472,18 @@ class WasmFullDecoder : public WasmDecoder { Push(result); return opcode_length; } - case kExprStringNewWtf8Array: { + case kExprStringNewUtf8Array: CHECK_PROTOTYPE_OPCODE(gc); - NON_CONST_ONLY - Wtf8PolicyImmediate imm(this, this->pc_ + opcode_length); - Value array = PeekPackedArray(2, 0, kWasmI8, WasmArrayAccess::kRead); - Value start = Peek(1, 1, kWasmI32); - Value end = Peek(0, 2, kWasmI32); - Value result = CreateValue(ValueType::Ref(HeapType::kString)); - CALL_INTERFACE_IF_OK_AND_REACHABLE(StringNewWtf8Array, imm, array, - start, end, &result); - Drop(3); - Push(result); - return opcode_length + imm.length; - } + return DecodeStringNewWtf8Array(unibrow::Utf8Variant::kUtf8, + opcode_length); + case kExprStringNewLossyUtf8Array: + CHECK_PROTOTYPE_OPCODE(gc); + return DecodeStringNewWtf8Array(unibrow::Utf8Variant::kLossyUtf8, + opcode_length); + case kExprStringNewWtf8Array: + CHECK_PROTOTYPE_OPCODE(gc); + return DecodeStringNewWtf8Array(unibrow::Utf8Variant::kWtf8, + opcode_length); case kExprStringNewWtf16Array: { CHECK_PROTOTYPE_OPCODE(gc); NON_CONST_ONLY @@ -5449,20 +5497,18 @@ class WasmFullDecoder : public WasmDecoder { Push(result); return opcode_length; } - case kExprStringEncodeWtf8Array: { + case kExprStringEncodeUtf8Array: CHECK_PROTOTYPE_OPCODE(gc); - NON_CONST_ONLY - Wtf8PolicyImmediate imm(this, this->pc_ + opcode_length); - Value str = Peek(2, 0, kWasmStringRef); - Value array = PeekPackedArray(1, 1, kWasmI8, WasmArrayAccess::kWrite); - Value start = Peek(0, 2, kWasmI32); - Value result = CreateValue(kWasmI32); - CALL_INTERFACE_IF_OK_AND_REACHABLE(StringEncodeWtf8Array, imm, str, - array, start, &result); - Drop(3); - Push(result); - return opcode_length + imm.length; - } + return DecodeStringEncodeWtf8Array(unibrow::Utf8Variant::kUtf8, + opcode_length); + case kExprStringEncodeLossyUtf8Array: + CHECK_PROTOTYPE_OPCODE(gc); + return DecodeStringEncodeWtf8Array(unibrow::Utf8Variant::kLossyUtf8, + opcode_length); + case kExprStringEncodeWtf8Array: + CHECK_PROTOTYPE_OPCODE(gc); + return DecodeStringEncodeWtf8Array(unibrow::Utf8Variant::kWtf8, + opcode_length); case kExprStringEncodeWtf16Array: { CHECK_PROTOTYPE_OPCODE(gc); NON_CONST_ONLY diff --git a/src/wasm/graph-builder-interface.cc b/src/wasm/graph-builder-interface.cc index 3219580a97..78a6b5572a 100644 --- a/src/wasm/graph-builder-interface.cc +++ b/src/wasm/graph-builder-interface.cc @@ -1371,18 +1371,18 @@ class WasmGraphBuildingInterface { } void StringNewWtf8(FullDecoder* decoder, - const EncodeWtf8Immediate& imm, - const Value& offset, const Value& size, Value* result) { - SetAndTypeNode(result, - builder_->StringNewWtf8(imm.memory.index, imm.policy.value, - offset.node, size.node)); + const MemoryIndexImmediate& memory, + const unibrow::Utf8Variant variant, const Value& offset, + const Value& size, Value* result) { + SetAndTypeNode(result, builder_->StringNewWtf8(memory.index, variant, + offset.node, size.node)); } void StringNewWtf8Array(FullDecoder* decoder, - const Wtf8PolicyImmediate& imm, + const unibrow::Utf8Variant variant, const Value& array, const Value& start, const Value& end, Value* result) { - SetAndTypeNode(result, builder_->StringNewWtf8Array(imm.value, array.node, + SetAndTypeNode(result, builder_->StringNewWtf8Array(variant, array.node, start.node, end.node)); } @@ -1406,15 +1406,15 @@ class WasmGraphBuildingInterface { } void StringMeasureWtf8(FullDecoder* decoder, - const Wtf8PolicyImmediate& imm, - const Value& str, Value* result) { - switch (imm.value) { - case kWtf8PolicyReject: + const unibrow::Utf8Variant variant, const Value& str, + Value* result) { + switch (variant) { + case unibrow::Utf8Variant::kUtf8: result->node = builder_->StringMeasureUtf8( str.node, NullCheckFor(str.type), decoder->position()); break; - case kWtf8PolicyAccept: - case kWtf8PolicyReplace: + case unibrow::Utf8Variant::kLossyUtf8: + case unibrow::Utf8Variant::kWtf8: result->node = builder_->StringMeasureWtf8( str.node, NullCheckFor(str.type), decoder->position()); break; @@ -1428,19 +1428,20 @@ class WasmGraphBuildingInterface { } void StringEncodeWtf8(FullDecoder* decoder, - const EncodeWtf8Immediate& imm, - const Value& str, const Value& offset, Value* result) { - result->node = builder_->StringEncodeWtf8( - imm.memory.index, imm.policy.value, str.node, NullCheckFor(str.type), - offset.node, decoder->position()); + const MemoryIndexImmediate& memory, + const unibrow::Utf8Variant variant, const Value& str, + const Value& offset, Value* result) { + result->node = builder_->StringEncodeWtf8(memory.index, variant, str.node, + NullCheckFor(str.type), + offset.node, decoder->position()); } void StringEncodeWtf8Array(FullDecoder* decoder, - const Wtf8PolicyImmediate& imm, + const unibrow::Utf8Variant variant, const Value& str, const Value& array, const Value& start, Value* result) { result->node = builder_->StringEncodeWtf8Array( - imm.value, str.node, NullCheckFor(str.type), array.node, + variant, str.node, NullCheckFor(str.type), array.node, NullCheckFor(array.type), start.node, decoder->position()); } @@ -1495,14 +1496,15 @@ class WasmGraphBuildingInterface { } void StringViewWtf8Encode(FullDecoder* decoder, - const EncodeWtf8Immediate& imm, + const MemoryIndexImmediate& memory, + const unibrow::Utf8Variant variant, const Value& view, const Value& addr, const Value& pos, const Value& bytes, Value* next_pos, Value* bytes_written) { - builder_->StringViewWtf8Encode( - imm.memory.index, imm.policy.value, view.node, NullCheckFor(view.type), - addr.node, pos.node, bytes.node, &next_pos->node, &bytes_written->node, - decoder->position()); + builder_->StringViewWtf8Encode(memory.index, variant, view.node, + NullCheckFor(view.type), addr.node, pos.node, + bytes.node, &next_pos->node, + &bytes_written->node, decoder->position()); } void StringViewWtf8Slice(FullDecoder* decoder, const Value& view, diff --git a/src/wasm/wasm-constants.h b/src/wasm/wasm-constants.h index ed9e1a3bc5..316694ec59 100644 --- a/src/wasm/wasm-constants.h +++ b/src/wasm/wasm-constants.h @@ -147,16 +147,6 @@ enum NameSectionKindCode : uint8_t { kTagCode = 11, }; -// What to do when treating a stringref as WTF-8 and we see an isolated -// surrogate. -enum StringRefWtf8Policy : uint8_t { - kWtf8PolicyReject = 0, // Strict UTF-8; no isolated surrogates allowed. - kWtf8PolicyAccept = 1, // Follow WTF-8 encoding of isolates surrogates. - kWtf8PolicyReplace = 2, // Replace isolated surrogates and decoding errors - // with U+FFFD. - kLastWtf8Policy = kWtf8PolicyReplace -}; - constexpr size_t kWasmPageSize = 0x10000; constexpr uint32_t kWasmPageSizeLog2 = 16; static_assert(kWasmPageSize == size_t{1} << kWasmPageSizeLog2, "consistency"); diff --git a/src/wasm/wasm-disassembler.cc b/src/wasm/wasm-disassembler.cc index 78899818fa..d43b911e48 100644 --- a/src/wasm/wasm-disassembler.cc +++ b/src/wasm/wasm-disassembler.cc @@ -375,13 +375,6 @@ class ImmediatesPrinter { out_ << " " << imm.index; // -- } - void Wtf8Policy(Wtf8PolicyImmediate& imm) { - out_ << (imm.value == kWtf8PolicyReject ? " reject" - : imm.value == kWtf8PolicyAccept ? " accept" - : imm.value == kWtf8PolicyReplace ? " replace" - : " unknown-policy"); - } - void TagIndex(TagIndexImmediate& imm) { out_ << " "; names()->PrintTagName(out_, imm.index); diff --git a/src/wasm/wasm-opcodes.h b/src/wasm/wasm-opcodes.h index 0c116fad6d..0140dead2f 100644 --- a/src/wasm/wasm-opcodes.h +++ b/src/wasm/wasm-opcodes.h @@ -726,20 +726,28 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig, V(BrOnNonArray, 0xfb67, _, "br_on_non_array") \ V(ExternInternalize, 0xfb70, _, "extern.internalize") \ V(ExternExternalize, 0xfb71, _, "extern.externalize") \ - V(StringNewWtf8, 0xfb80, _, "string.new_wtf8") \ + V(StringNewUtf8, 0xfb80, _, "string.new_utf8") \ V(StringNewWtf16, 0xfb81, _, "string.new_wtf16") \ V(StringConst, 0xfb82, _, "string.const") \ + V(StringMeasureUtf8, 0xfb83, _, "string.measure_utf8") \ V(StringMeasureWtf8, 0xfb84, _, "string.measure_wtf8") \ V(StringMeasureWtf16, 0xfb85, _, "string.measure_wtf16") \ - V(StringEncodeWtf8, 0xfb86, _, "string.encode_wtf8") \ + V(StringEncodeUtf8, 0xfb86, _, "string.encode_utf8") \ V(StringEncodeWtf16, 0xfb87, _, "string.encode_wtf16") \ V(StringConcat, 0xfb88, _, "string.concat") \ V(StringEq, 0xfb89, _, "string.eq") \ V(StringIsUSVSequence, 0xfb8a, _, "string.is_usv_sequence") \ + V(StringNewLossyUtf8, 0xfb8b, _, "string.new_lossy_utf8") \ + V(StringNewWtf8, 0xfb8c, _, "string.new_wtf8") \ + V(StringEncodeLossyUtf8, 0xfb8d, _, "string.encode_lossy_utf8") \ + V(StringEncodeWtf8, 0xfb8e, _, "string.encode_wtf8") \ V(StringAsWtf8, 0xfb90, _, "string.as_wtf8") \ V(StringViewWtf8Advance, 0xfb91, _, "stringview_wtf8.advance") \ - V(StringViewWtf8Encode, 0xfb92, _, "stringview_wtf8.encode") \ + V(StringViewWtf8EncodeUtf8, 0xfb92, _, "stringview_wtf8.encode_utf8") \ V(StringViewWtf8Slice, 0xfb93, _, "stringview_wtf8.slice") \ + V(StringViewWtf8EncodeLossyUtf8, 0xfb94, _, \ + "stringview_wtf8.encode_lossy_utf8") \ + V(StringViewWtf8EncodeWtf8, 0xfb95, _, "stringview_wtf8.encode_wtf8") \ V(StringAsWtf16, 0xfb98, _, "string.as_wtf16") \ V(StringViewWtf16Length, 0xfb99, _, "stringview_wtf16.length") \ V(StringViewWtf16GetCodeUnit, 0xfb9a, _, "stringview_wtf16.get_codeunit") \ @@ -750,10 +758,14 @@ bool V8_EXPORT_PRIVATE IsJSCompatibleSignature(const FunctionSig* sig, V(StringViewIterAdvance, 0xfba2, _, "stringview_iter.advance") \ V(StringViewIterRewind, 0xfba3, _, "stringview_iter.rewind") \ V(StringViewIterSlice, 0xfba4, _, "stringview_iter.slice") \ - V(StringNewWtf8Array, 0xfbb0, _, "string.new_wtf8_array") \ + V(StringNewUtf8Array, 0xfbb0, _, "string.new_utf8_array") \ V(StringNewWtf16Array, 0xfbb1, _, "string.new_wtf16_array") \ - V(StringEncodeWtf8Array, 0xfbb2, _, "string.encode_wtf8_array") \ - V(StringEncodeWtf16Array, 0xfbb3, _, "string.encode_wtf16_array") + V(StringEncodeUtf8Array, 0xfbb2, _, "string.encode_utf8_array") \ + V(StringEncodeWtf16Array, 0xfbb3, _, "string.encode_wtf16_array") \ + V(StringNewLossyUtf8Array, 0xfbb4, _, "string.new_lossy_utf8_array") \ + V(StringNewWtf8Array, 0xfbb5, _, "string.new_wtf8_array") \ + V(StringEncodeLossyUtf8Array, 0xfbb6, _, "string.encode_lossy_utf8_array") \ + V(StringEncodeWtf8Array, 0xfbb7, _, "string.encode_wtf8_array") // All opcodes. #define FOREACH_OPCODE(V) \ diff --git a/test/mjsunit/wasm/stringrefs-exec-gc.js b/test/mjsunit/wasm/stringrefs-exec-gc.js index 93b46ceef3..25489df802 100644 --- a/test/mjsunit/wasm/stringrefs-exec-gc.js +++ b/test/mjsunit/wasm/stringrefs-exec-gc.js @@ -114,15 +114,16 @@ function makeWtf8TestDataSegment() { kGCPrefix, kExprArrayNewData, i8_array, data_index ]).index; - for (let [policy, name] of [[kWtf8PolicyAccept, "new_wtf8"], - [kWtf8PolicyReject, "new_utf8"], - [kWtf8PolicyReplace, "new_utf8_sloppy"]]) { + for (let [instr, name] of + [[kExprStringNewWtf8Array, "new_wtf8"], + [kExprStringNewUtf8Array, "new_utf8"], + [kExprStringNewLossyUtf8Array, "new_utf8_sloppy"]]) { builder.addFunction(name, kSig_w_ii) .exportFunc() .addBody([ kExprCallFunction, make_i8_array, kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8Array), policy + ...GCInstr(instr) ]); } @@ -133,7 +134,7 @@ function makeWtf8TestDataSegment() { ...wasmI32Const("ascii".length), kGCPrefix, kExprArrayNewData, i8_array, ascii_data_index, kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8Array), kWtf8PolicyAccept + ...GCInstr(kExprStringNewWtf8Array) ]); let instance = builder.instantiate(); @@ -268,7 +269,9 @@ function makeWtf16TestDataSegment() { let kSig_w_wii = makeSig([kWasmStringRef, kWasmI32, kWasmI32], [kWasmStringRef]); - for (let [policy, name] of ["utf8", "wtf8", "replace"].entries()) { + for (let [instr, name] of [[kExprStringEncodeUtf8Array, "utf8"], + [kExprStringEncodeWtf8Array, "wtf8"], + [kExprStringEncodeLossyUtf8Array, "replace"]]) { // Allocate an array that's exactly the expected size, and encode // into it. Then decode it. // (str, length, offset=0) -> str @@ -286,14 +289,14 @@ function makeWtf16TestDataSegment() { kExprLocalGet, 0, kExprLocalGet, 3, kExprLocalGet, 2, - ...GCInstr(kExprStringEncodeWtf8Array), policy, + ...GCInstr(instr), kExprLocalSet, 4, // Read buffer. kExprLocalGet, 3, kExprLocalGet, 2, kExprLocalGet, 2, kExprLocalGet, 4, kExprI32Add, - ...GCInstr(kExprStringNewWtf8Array), kWtf8PolicyAccept, + ...GCInstr(kExprStringNewWtf8Array) ]); } @@ -303,17 +306,17 @@ function makeWtf16TestDataSegment() { kExprRefNull, kStringRefCode, kExprI32Const, 0, kGCPrefix, kExprArrayNewDefault, i8_array, kExprI32Const, 0, - ...GCInstr(kExprStringEncodeWtf8Array), 0, + ...GCInstr(kExprStringEncodeWtf8Array) ]); builder.addFunction("encode_null_array", kSig_i_v) .exportFunc() .addBody([ kExprI32Const, 0, kGCPrefix, kExprArrayNewDefault, i8_array, kExprI32Const, 0, kExprI32Const, 0, - ...GCInstr(kExprStringNewWtf8Array), kWtf8PolicyAccept, + ...GCInstr(kExprStringNewWtf8Array), kExprRefNull, i8_array, kExprI32Const, 0, - ...GCInstr(kExprStringEncodeWtf8Array), kWtf8PolicyAccept, + ...GCInstr(kExprStringEncodeWtf8Array) ]); let instance = builder.instantiate(); diff --git a/test/mjsunit/wasm/stringrefs-exec.js b/test/mjsunit/wasm/stringrefs-exec.js index 5050aecfaf..9969ad1cf4 100644 --- a/test/mjsunit/wasm/stringrefs-exec.js +++ b/test/mjsunit/wasm/stringrefs-exec.js @@ -162,21 +162,21 @@ function makeWtf8TestDataSegment() { .exportFunc() .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8), 0, kWtf8PolicyReject + ...GCInstr(kExprStringNewUtf8), 0 ]); builder.addFunction("string_new_wtf8", kSig_w_ii) .exportFunc() .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8), 0, kWtf8PolicyAccept + ...GCInstr(kExprStringNewWtf8), 0 ]); builder.addFunction("string_new_utf8_sloppy", kSig_w_ii) .exportFunc() .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8), 0, kWtf8PolicyReplace + ...GCInstr(kExprStringNewLossyUtf8), 0 ]); let instance = builder.instantiate(); @@ -282,50 +282,34 @@ function makeWtf16TestDataSegment() { .exportFunc() .addBody([ kExprLocalGet, 0, - ...GCInstr(kExprStringMeasureWtf8), 0 + ...GCInstr(kExprStringMeasureUtf8) ]); builder.addFunction("string_measure_wtf8", kSig_i_w) .exportFunc() .addBody([ kExprLocalGet, 0, - ...GCInstr(kExprStringMeasureWtf8), 1 - ]); - - builder.addFunction("string_measure_wtf8_replace", kSig_i_w) - .exportFunc() - .addBody([ - kExprLocalGet, 0, - ...GCInstr(kExprStringMeasureWtf8), 2 + ...GCInstr(kExprStringMeasureWtf8) ]); builder.addFunction("string_measure_utf8_null", kSig_i_v) .exportFunc() .addBody([ kExprRefNull, kStringRefCode, - ...GCInstr(kExprStringMeasureWtf8), 0 + ...GCInstr(kExprStringMeasureUtf8) ]); builder.addFunction("string_measure_wtf8_null", kSig_i_v) .exportFunc() .addBody([ kExprRefNull, kStringRefCode, - ...GCInstr(kExprStringMeasureWtf8), 1 - ]); - - builder.addFunction("string_measure_wtf8_replace_null", kSig_i_v) - .exportFunc() - .addBody([ - kExprRefNull, kStringRefCode, - ...GCInstr(kExprStringMeasureWtf8), 2 + ...GCInstr(kExprStringMeasureWtf8) ]); let instance = builder.instantiate(); for (let str of interestingStrings) { let wtf8 = encodeWtf8(str); assertEquals(wtf8.length, instance.exports.string_measure_wtf8(str)); - assertEquals(wtf8.length, - instance.exports.string_measure_wtf8_replace(str)); if (HasIsolatedSurrogate(str)) { assertEquals(-1, instance.exports.string_measure_utf8(str)); } else { @@ -337,8 +321,6 @@ function makeWtf16TestDataSegment() { WebAssembly.RuntimeError, "dereferencing a null pointer"); assertThrows(() => instance.exports.string_measure_wtf8_null(), WebAssembly.RuntimeError, "dereferencing a null pointer"); - assertThrows(() => instance.exports.string_measure_wtf8_replace_null(), - WebAssembly.RuntimeError, "dereferencing a null pointer"); })(); (function TestStringMeasureWtf16() { @@ -372,13 +354,15 @@ function makeWtf16TestDataSegment() { builder.addMemory(1, undefined, true /* exported */, false); - for (let [policy, name] of ["utf8", "wtf8", "replace"].entries()) { + for (let [instr, name] of [[kExprStringEncodeUtf8, "utf8"], + [kExprStringEncodeWtf8, "wtf8"], + [kExprStringEncodeLossyUtf8, "replace"]]) { builder.addFunction("encode_" + name, kSig_i_wi) .exportFunc() .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringEncodeWtf8), 0, policy, + ...GCInstr(instr), 0, ]); } @@ -860,9 +844,10 @@ function makeWtf16TestDataSegment() { ...GCInstr(kExprStringViewWtf8Advance) ]); - for (let [name, policy] of Object.entries({utf8: kWtf8PolicyReject, - wtf8: kWtf8PolicyAccept, - replace: kWtf8PolicyReplace})) { + for (let [instr, name] of + [[kExprStringViewWtf8EncodeUtf8, "utf8"], + [kExprStringViewWtf8EncodeWtf8, "wtf8"], + [kExprStringViewWtf8EncodeLossyUtf8, "replace"]]) { builder.addFunction(`encode_${name}`, kSig_ii_wiii) .exportFunc() .addBody([ @@ -871,7 +856,7 @@ function makeWtf16TestDataSegment() { kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 3, - ...GCInstr(kExprStringViewWtf8Encode), 0, policy + ...GCInstr(instr), 0 ]); } builder.addFunction("encode_null", kSig_v_v) @@ -881,7 +866,7 @@ function makeWtf16TestDataSegment() { kExprI32Const, 0, kExprI32Const, 0, kExprI32Const, 0, - ...GCInstr(kExprStringViewWtf8Encode), 0, kWtf8PolicyAccept, + ...GCInstr(kExprStringViewWtf8EncodeWtf8), 0, kExprDrop, kExprDrop ]); diff --git a/test/mjsunit/wasm/stringrefs-valid.js b/test/mjsunit/wasm/stringrefs-valid.js index 5a7ec9c47e..9bcc294e99 100644 --- a/test/mjsunit/wasm/stringrefs-valid.js +++ b/test/mjsunit/wasm/stringrefs-valid.js @@ -72,22 +72,20 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32], builder.addMemory(0, undefined, false, false); - builder.addFunction("string.new_wtf8/reject", kSig_w_ii) + builder.addFunction("string.new_utf8", kSig_w_ii) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8), 0, kWtf8PolicyReject + ...GCInstr(kExprStringNewUtf8), 0 ]); - - builder.addFunction("string.new_wtf8/accept", kSig_w_ii) + builder.addFunction("string.new_lossy_utf8", kSig_w_ii) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8), 0, kWtf8PolicyAccept + ...GCInstr(kExprStringNewLossyUtf8), 0 ]); - - builder.addFunction("string.new_wtf8/replace", kSig_w_ii) + builder.addFunction("string.new_wtf8", kSig_w_ii) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8), 0, kWtf8PolicyReplace + ...GCInstr(kExprStringNewWtf8), 0 ]); builder.addFunction("string.new_wtf16", kSig_w_ii) @@ -102,22 +100,15 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32], ...GCInstr(kExprStringConst), 0 ]); - builder.addFunction("string.measure_wtf8/utf-8", kSig_i_w) + builder.addFunction("string.measure_utf8", kSig_i_w) .addBody([ kExprLocalGet, 0, - ...GCInstr(kExprStringMeasureWtf8), kWtf8PolicyReject + ...GCInstr(kExprStringMeasureUtf8) ]); - - builder.addFunction("string.measure_wtf8/wtf-8", kSig_i_w) + builder.addFunction("string.measure_wtf8", kSig_i_w) .addBody([ kExprLocalGet, 0, - ...GCInstr(kExprStringMeasureWtf8), kWtf8PolicyAccept - ]); - - builder.addFunction("string.measure_wtf8/replace", kSig_i_w) - .addBody([ - kExprLocalGet, 0, - ...GCInstr(kExprStringMeasureWtf8), kWtf8PolicyReplace + ...GCInstr(kExprStringMeasureWtf8) ]); builder.addFunction("string.measure_wtf16", kSig_i_w) @@ -126,20 +117,20 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32], ...GCInstr(kExprStringMeasureWtf16) ]); - builder.addFunction("string.encode_wtf8/utf-8", kSig_i_wi) + builder.addFunction("string.encode_utf8", kSig_i_wi) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringEncodeWtf8), 0, kWtf8PolicyAccept + ...GCInstr(kExprStringEncodeUtf8), 0 ]); - builder.addFunction("string.encode_wtf8/wtf-8", kSig_i_wi) + builder.addFunction("string.encode_lossy_utf8", kSig_i_wi) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringEncodeWtf8), 0, kWtf8PolicyReject + ...GCInstr(kExprStringEncodeLossyUtf8), 0 ]); - builder.addFunction("string.encode_wtf8/replace", kSig_i_wi) + builder.addFunction("string.encode_wtf8", kSig_i_wi) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringEncodeWtf8), 0, kWtf8PolicyReplace + ...GCInstr(kExprStringEncodeWtf8), 0 ]); builder.addFunction("string.encode_wtf16", kSig_i_wi) @@ -172,22 +163,20 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32], ...GCInstr(kExprStringViewWtf8Advance) ]); - builder.addFunction("stringview_wtf8.encode/utf-8", kSig_ii_xiii) + builder.addFunction("stringview_wtf8.encode_utf8", kSig_ii_xiii) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 3, - ...GCInstr(kExprStringViewWtf8Encode), 0, 0 + ...GCInstr(kExprStringViewWtf8EncodeUtf8), 0 ]); - - builder.addFunction("stringview_wtf8.encode/wtf-8", kSig_ii_xiii) + builder.addFunction("stringview_wtf8.encode_lossy_utf8", kSig_ii_xiii) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 3, - ...GCInstr(kExprStringViewWtf8Encode), 0, 1 + ...GCInstr(kExprStringViewWtf8EncodeLossyUtf8), 0 ]); - - builder.addFunction("stringview_wtf8.encode/replace", kSig_ii_xiii) + builder.addFunction("stringview_wtf8.encode_wtf8", kSig_ii_xiii) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2, kExprLocalGet, 3, - ...GCInstr(kExprStringViewWtf8Encode), 0, 2 + ...GCInstr(kExprStringViewWtf8EncodeWtf8), 0 ]); builder.addFunction("stringview_wtf8.slice", kSig_w_xii) @@ -259,28 +248,26 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32], let i8_array = builder.addArray(kWasmI8, true); let i16_array = builder.addArray(kWasmI16, true); - builder.addFunction("string.new_wtf8_array/accept", kSig_w_v) + builder.addFunction("string.new_utf8_array", kSig_w_v) .addBody([ kExprRefNull, i8_array, kExprI32Const, 0, kExprI32Const, 0, - ...GCInstr(kExprStringNewWtf8Array), kWtf8PolicyAccept + ...GCInstr(kExprStringNewWtf8Array) ]); - - builder.addFunction("string.new_wtf8_array/reject", kSig_w_v) + builder.addFunction("string.new_lossy_utf8_array", kSig_w_v) .addBody([ kExprRefNull, i8_array, kExprI32Const, 0, kExprI32Const, 0, - ...GCInstr(kExprStringNewWtf8Array), kWtf8PolicyReject + ...GCInstr(kExprStringNewLossyUtf8Array) ]); - - builder.addFunction("string.new_wtf8_array/replace", kSig_w_v) + builder.addFunction("string.new_wtf8_array", kSig_w_v) .addBody([ kExprRefNull, i8_array, kExprI32Const, 0, kExprI32Const, 0, - ...GCInstr(kExprStringNewWtf8Array), kWtf8PolicyReplace + ...GCInstr(kExprStringNewWtf8Array) ]); builder.addFunction("string.new_wtf16_array", kSig_w_v) @@ -291,28 +278,26 @@ let kSig_w_zi = makeSig([kWasmStringViewIter, kWasmI32], ...GCInstr(kExprStringNewWtf16Array) ]); - builder.addFunction("string.encode_wtf8_array/accept", kSig_i_v) + builder.addFunction("string.encode_utf8_array", kSig_i_v) .addBody([ kExprRefNull, kStringRefCode, kExprRefNull, i8_array, kExprI32Const, 0, - ...GCInstr(kExprStringEncodeWtf8Array), kWtf8PolicyAccept + ...GCInstr(kExprStringEncodeUtf8Array) ]); - - builder.addFunction("string.encode_wtf8_array/reject", kSig_i_v) + builder.addFunction("string.encode_lossy_utf8_array", kSig_i_v) .addBody([ kExprRefNull, kStringRefCode, kExprRefNull, i8_array, kExprI32Const, 0, - ...GCInstr(kExprStringEncodeWtf8Array), kWtf8PolicyReject + ...GCInstr(kExprStringEncodeLossyUtf8Array) ]); - - builder.addFunction("string.encode_wtf8_array/replace", kSig_i_v) + builder.addFunction("string.encode_wtf8_array", kSig_i_v) .addBody([ kExprRefNull, kStringRefCode, kExprRefNull, i8_array, kExprI32Const, 0, - ...GCInstr(kExprStringEncodeWtf8Array), kWtf8PolicyReplace + ...GCInstr(kExprStringEncodeWtf8Array) ]); builder.addFunction("string.encode_wtf16_array", kSig_i_v) @@ -340,7 +325,7 @@ assertInvalid( builder.addFunction("string.new_wtf8/no-mem", kSig_w_ii) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8), 0, kWtf8PolicyAccept + ...GCInstr(kExprStringNewWtf8), 0 ]); }, /memory instruction with no memory/); @@ -351,7 +336,7 @@ assertInvalid( builder.addFunction("string.new_wtf8/bad-mem", kSig_w_ii) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringNewWtf8), 1, kWtf8PolicyAccept + ...GCInstr(kExprStringNewWtf8), 1 ]); }, /expected memory index 0, found 1/); @@ -361,7 +346,7 @@ assertInvalid( builder.addFunction("string.encode_wtf8/no-mem", kSig_i_wi) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringEncodeWtf8), 0, kWtf8PolicyAccept + ...GCInstr(kExprStringEncodeWtf8), 0 ]); }, /memory instruction with no memory/); @@ -372,45 +357,11 @@ assertInvalid( builder.addFunction("string.encode_wtf8/bad-mem", kSig_i_wi) .addBody([ kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringEncodeWtf8), 1, kWtf8PolicyAccept + ...GCInstr(kExprStringEncodeWtf8), 1 ]); }, /expected memory index 0, found 1/); -assertInvalid( - builder => { - builder.addMemory(0, undefined, false, false); - builder.addFunction("string.encode_wtf8/bad-policy", kSig_i_wi) - .addBody([ - kExprLocalGet, 0, kExprLocalGet, 1, - ...GCInstr(kExprStringEncodeWtf8), 0, 3 - ]); - }, - /expected wtf8 policy 0, 1, or 2, but found 3/); - -assertInvalid( - builder => { - builder.addFunction("string.measure_wtf8/bad-policy", kSig_i_w) - .addBody([ - kExprLocalGet, 0, - ...GCInstr(kExprStringMeasureWtf8), 3 - ]); - }, - /expected wtf8 policy 0, 1, or 2, but found 3/); - -assertInvalid( - builder => { - let i8_array = builder.addArray(kWasmI8, true); - builder.addFunction("string.new_wtf8_array/bad-policy", kSig_w_v) - .addBody([ - kExprRefNull, i8_array, - kExprI32Const, 0, - kExprI32Const, 0, - ...GCInstr(kExprStringNewWtf8Array), 3 - ]); - }, - /expected wtf8 policy 0, 1, or 2, but found 3/); - assertInvalid( builder => { let i16_array = builder.addArray(kWasmI16, true); @@ -419,7 +370,7 @@ assertInvalid( kExprRefNull, i16_array, kExprI32Const, 0, kExprI32Const, 0, - ...GCInstr(kExprStringNewWtf8Array), kWtf8PolicyAccept + ...GCInstr(kExprStringNewWtf8Array) ]); }, /string.new_wtf8_array\[0\] expected array of i8, found ref.null of type \(ref null 0\)/); @@ -449,7 +400,7 @@ assertInvalid( kExprLocalGet, 0, kExprLocalGet, 1, kExprLocalGet, 2, - ...GCInstr(kExprStringEncodeWtf8Array), kWtf8PolicyAccept, + ...GCInstr(kExprStringEncodeWtf8Array) ]); }, /string.encode_wtf8_array\[1\] expected array of mutable i8, found local.get of type \(ref 0\)/); diff --git a/test/mjsunit/wasm/wasm-module-builder.js b/test/mjsunit/wasm/wasm-module-builder.js index a3ee50027c..390337def0 100644 --- a/test/mjsunit/wasm/wasm-module-builder.js +++ b/test/mjsunit/wasm/wasm-module-builder.js @@ -526,20 +526,27 @@ let kExprBrOnNonI31 = 0x65; let kExprBrOnNonArray = 0x67; let kExprExternInternalize = 0x70; let kExprExternExternalize = 0x71; -let kExprStringNewWtf8 = 0x80; +let kExprStringNewUtf8 = 0x80; let kExprStringNewWtf16 = 0x81; let kExprStringConst = 0x82; +let kExprStringMeasureUtf8 = 0x83; let kExprStringMeasureWtf8 = 0x84; let kExprStringMeasureWtf16 = 0x85; -let kExprStringEncodeWtf8 = 0x86; +let kExprStringEncodeUtf8 = 0x86; let kExprStringEncodeWtf16 = 0x87; let kExprStringConcat = 0x88; let kExprStringEq = 0x89; let kExprStringIsUsvSequence = 0x8a; +let kExprStringNewLossyUtf8 = 0x8b; +let kExprStringNewWtf8 = 0x8c; +let kExprStringEncodeLossyUtf8 = 0x8d; +let kExprStringEncodeWtf8 = 0x8e; let kExprStringAsWtf8 = 0x90; let kExprStringViewWtf8Advance = 0x91; -let kExprStringViewWtf8Encode = 0x92; +let kExprStringViewWtf8EncodeUtf8 = 0x92; let kExprStringViewWtf8Slice = 0x93; +let kExprStringViewWtf8EncodeLossyUtf8 = 0x94; +let kExprStringViewWtf8EncodeWtf8 = 0x95; let kExprStringAsWtf16 = 0x98; let kExprStringViewWtf16Length = 0x99; let kExprStringViewWtf16GetCodeunit = 0x9a; @@ -550,10 +557,14 @@ let kExprStringViewIterNext = 0xa1 let kExprStringViewIterAdvance = 0xa2; let kExprStringViewIterRewind = 0xa3 let kExprStringViewIterSlice = 0xa4; -let kExprStringNewWtf8Array = 0xb0; +let kExprStringNewUtf8Array = 0xb0; let kExprStringNewWtf16Array = 0xb1; -let kExprStringEncodeWtf8Array = 0xb2; +let kExprStringEncodeUtf8Array = 0xb2; let kExprStringEncodeWtf16Array = 0xb3; +let kExprStringNewLossyUtf8Array = 0xb4; +let kExprStringNewWtf8Array = 0xb5; +let kExprStringEncodeLossyUtf8Array = 0xb6; +let kExprStringEncodeWtf8Array = 0xb7; // Numeric opcodes. let kExprI32SConvertSatF32 = 0x00; @@ -883,11 +894,6 @@ let kExprI32x4TruncSatF64x2UZero = 0xfd; let kExprF64x2ConvertLowI32x4S = 0xfe; let kExprF64x2ConvertLowI32x4U = 0xff; -// WTF-8 parsing policies. -let kWtf8PolicyReject = 0; -let kWtf8PolicyAccept = 1; -let kWtf8PolicyReplace = 2; - // Compilation hint constants. let kCompilationHintStrategyDefault = 0x00; let kCompilationHintStrategyLazy = 0x01; diff --git a/test/unittests/wasm/function-body-decoder-unittest.cc b/test/unittests/wasm/function-body-decoder-unittest.cc index c803f83829..52f0e4696f 100644 --- a/test/unittests/wasm/function-body-decoder-unittest.cc +++ b/test/unittests/wasm/function-body-decoder-unittest.cc @@ -4864,8 +4864,8 @@ TEST_F(WasmOpcodeLengthTest, GCOpcodes) { ExpectLength(3, 0xfb, 0x07, 0x42); ExpectLength(4, 0xfb, 0x07, 0x80, 0x00); - // string.new_wtf8 with $mem=0, $policy=0. - ExpectLength(5, 0xfb, 0x80, 0x01, 0x00, 0x00); + // string.new_utf8 with $mem=0. + ExpectLength(4, 0xfb, 0x80, 0x01, 0x00); // string.as_wtf8. ExpectLength(3, 0xfb, 0x90, 0x01); From edb57e37ae2b86e10c4cf2532f96cf71d67047b8 Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Tue, 13 Sep 2022 10:30:05 -0700 Subject: [PATCH 0086/1772] [Temporal] Fix TimeZone get*Transition 1. Return null if the transition is out of bound. 2. Remove incorrect MAYBE_RETURN which is handled by the IsNothing check. Bug: v8:11544 Change-Id: Ia54f68831120bd2460cb813464168b1a2c92da3d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3893595 Commit-Queue: Frank Tang Reviewed-by: Adam Klein Cr-Commit-Position: refs/heads/main@{#83171} --- src/objects/js-temporal-objects.cc | 23 +++++++++++++++++++---- test/test262/test262.status | 4 +--- 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/src/objects/js-temporal-objects.cc b/src/objects/js-temporal-objects.cc index a098eb454f..2f3b106f6d 100644 --- a/src/objects/js-temporal-objects.cc +++ b/src/objects/js-temporal-objects.cc @@ -10922,16 +10922,31 @@ MaybeHandle GetIANATimeZoneTransition(Isolate* isolate, .ToHandleChecked() ->AsInt64(), transition); - MAYBE_RETURN(maybe_transition, Handle()); // If there are no transition in this timezone, return null. if (maybe_transition.IsNothing()) { return isolate->factory()->null_value(); } + // #sec-temporal-getianatimezonenexttransition and + // #sec-temporal-getianatimezoneprevioustransition states: + // "The operation returns null if no such transition exists for which t ≤ + // ℤ(nsMaxInstant)." and "The operation returns null if no such transition + // exists for which t ≥ ℤ(nsMinInstant)." + // + // nsMinInstant = -nsMaxInstant = -8.64 × 10^21 => msMinInstant = -8.64 x + // 10^15 + constexpr int64_t kMsMinInstant = -8.64e15; + // nsMaxInstant = 10^8 × nsPerDay = 8.64 × 10^21 => msMaxInstant = 8.64 x + // 10^15 + constexpr int64_t kMsMaxInstant = 8.64e15; + + int64_t ms = maybe_transition.FromJust(); + if (ms < kMsMinInstant || ms > kMsMaxInstant) { + return isolate->factory()->null_value(); + } + // Convert the transition from milliseconds to nanoseconds. - return BigInt::Multiply( - isolate, BigInt::FromInt64(isolate, maybe_transition.FromJust()), - one_million); + return BigInt::Multiply(isolate, BigInt::FromInt64(isolate, ms), one_million); } // #sec-temporal-getianatimezonenexttransition MaybeHandle GetIANATimeZoneNextTransition(Isolate* isolate, diff --git a/test/test262/test262.status b/test/test262/test262.status index 9a0f8c504c..d7b120af66 100644 --- a/test/test262/test262.status +++ b/test/test262/test262.status @@ -535,10 +535,8 @@ 'intl402/Temporal/TimeZone/prototype/getNextTransition/subtract-second-and-nanosecond-from-last-transition': [FAIL], 'intl402/Temporal/TimeZone/prototype/getPreviousTransition/nanoseconds-subtracted-or-added-at-dst-transition': [FAIL], - 'intl402/Temporal/TimeZone/prototype/getNextTransition/transition-at-instant-boundaries': [FAIL], 'intl402/Temporal/TimeZone/prototype/getOffsetNanosecondsFor/nanoseconds-subtracted-or-added-at-dst-transition': [FAIL], 'intl402/Temporal/TimeZone/prototype/getPlainDateTimeFor/dst': [FAIL], - 'intl402/Temporal/TimeZone/prototype/getPreviousTransition/transition-at-instant-boundaries': [FAIL], 'staging/Temporal/Duration/old/add': [FAIL], 'staging/Temporal/Duration/old/limits': [FAIL], 'staging/Temporal/Duration/old/round': [FAIL], @@ -547,7 +545,6 @@ 'staging/Temporal/Duration/old/total': [FAIL], 'staging/Temporal/Regex/old/plaintime': [FAIL], 'staging/Temporal/Regex/old/timezone': [FAIL], - 'staging/Temporal/TimeZone/old/getNextTransition': [FAIL], 'staging/Temporal/TimeZone/old/subminute-offset': [FAIL], 'staging/Temporal/ZonedDateTime/old/construction-and-properties': [FAIL], 'staging/Temporal/ZonedDateTime/old/dst-math': [FAIL], @@ -813,6 +810,7 @@ 'staging/Temporal/TimeZone/old/dst-change': [FAIL], 'staging/Temporal/TimeZone/old/getInstantFor': [FAIL], 'staging/Temporal/TimeZone/old/getInstantFor-disambiguation': [FAIL], + 'staging/Temporal/TimeZone/old/getNextTransition': [FAIL], 'staging/Temporal/TimeZone/old/getPossibleInstantsFor': [FAIL], 'staging/Temporal/TimeZone/old/getPreviousTransition': [FAIL], 'staging/Temporal/TimeZone/old/timezone-america-la': [FAIL], From 2847ad2e2027591e2b35e03beb61b1015f47f201 Mon Sep 17 00:00:00 2001 From: Frank Tang Date: Mon, 12 Sep 2022 23:56:51 -0700 Subject: [PATCH 0087/1772] [Temporal] Sync to PR 2291 Remove RegulateISODate after BalanceISODate and inline one call to AddISODate https://github.com/tc39/proposal-temporal/pull/2291/files Spec Text: https://tc39.es/proposal-temporal/#sec-temporal-addisodate https://tc39.es/proposal-temporal/#sec-get-temporal.zoneddatetime.prototype.hoursinday Bug: v8:11544 Change-Id: I4d5faaa48a26d37015c82bc06b3414698db9945d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3893558 Commit-Queue: Frank Tang Reviewed-by: Adam Klein Cr-Commit-Position: refs/heads/main@{#83172} --- src/objects/js-temporal-objects.cc | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/src/objects/js-temporal-objects.cc b/src/objects/js-temporal-objects.cc index 2f3b106f6d..0251da5c72 100644 --- a/src/objects/js-temporal-objects.cc +++ b/src/objects/js-temporal-objects.cc @@ -9744,12 +9744,8 @@ Maybe AddISODate(Isolate* isolate, // 5. Set days to days + 7 × weeks. // 6. Let d be intermediate.[[Day]] + days. intermediate.day += duration.days + 7 * duration.weeks; - // 7. Let intermediate be ! BalanceISODate(intermediate.[[Year]], - // intermediate.[[Month]], d). - intermediate = BalanceISODate(isolate, intermediate); - // 8. Return ? RegulateISODate(intermediate.[[Year]], intermediate.[[Month]], - // intermediate.[[Day]], overflow). - return RegulateISODate(isolate, overflow, intermediate); + // 7. Return BalanceISODate(intermediate.[[Year]], intermediate.[[Month]], d). + return Just(BalanceISODate(isolate, intermediate)); } // #sec-temporal-differenceisodate @@ -15867,17 +15863,10 @@ MaybeHandle JSTemporalZonedDateTime::HoursInDay( {0, 0, 0, 0, 0, 0}}, iso_calendar), Smi); - // 11. Let tomorrowFields be ? AddISODate(year, month, day, 0, 0, 0, 1, - // "reject"). - DateRecordCommon tomorrow_fields; - MAYBE_ASSIGN_RETURN_ON_EXCEPTION_VALUE( - isolate, tomorrow_fields, - AddISODate( - isolate, - {temporal_date_time->iso_year(), temporal_date_time->iso_month(), - temporal_date_time->iso_day()}, - {0, 0, 0, 1}, ShowOverflow::kReject), - Handle()); + // 11. Let tomorrowFields be BalanceISODate(year, month, day + 1). + DateRecordCommon tomorrow_fields = BalanceISODate( + isolate, {temporal_date_time->iso_year(), temporal_date_time->iso_month(), + temporal_date_time->iso_day() + 1}); // 12. Let tomorrow be ? CreateTemporalDateTime(tomorrowFields.[[Year]], // tomorrowFields.[[Month]], tomorrowFields.[[Day]], 0, 0, 0, 0, 0, 0, From 704c571d904777cbab04e09e002db9f2d76666e5 Mon Sep 17 00:00:00 2001 From: Thibaud Michaud Date: Tue, 13 Sep 2022 20:16:46 +0200 Subject: [PATCH 0088/1772] [wasm] Trap on invalid suspender object Trap if the suspender argument provided to the JSPI import wrapper is invalid. For now, the suspender argument is expected to be the active suspender. In the future, it will also be possible to suspend to a parent of the current suspender. This will only be possible once wasm-to-wasm suspending wrappers are supported, or if and when JSPI suspenders become compatible with their core stack-switching counterpart (e.g. Fibers in the fiber proposal). R=jkummerow@chromium.org Bug: v8:12191 Change-Id: I650454ed076bd251b0aa18656774d4c4b2d3bfdc Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892697 Reviewed-by: Jakob Kummerow Commit-Queue: Thibaud Michaud Cr-Commit-Position: refs/heads/main@{#83173} --- src/builtins/x64/builtins-x64.cc | 5 ----- src/common/message-template.h | 1 + src/compiler/wasm-compiler.cc | 18 +++++++++++++++--- src/runtime/runtime-wasm.cc | 9 +++++++++ src/runtime/runtime.h | 1 + test/mjsunit/wasm/stack-switching.js | 26 ++++++++++++++++++++++++++ 6 files changed, 52 insertions(+), 8 deletions(-) diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc index c1e89191d1..d5aaedb2b3 100644 --- a/src/builtins/x64/builtins-x64.cc +++ b/src/builtins/x64/builtins-x64.cc @@ -3971,11 +3971,6 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { __ subq(rsp, Immediate(-(BuiltinWasmWrapperConstants::kGCScanSlotCountOffset - TypedFrameConstants::kFixedFrameSizeFromFp))); - // TODO(thibaudm): Throw if any of the following holds: - // - caller is null - // - ActiveSuspender is undefined - // - 'suspender' is not the active suspender - // ------------------------------------------- // Save current state in active jump buffer. // ------------------------------------------- diff --git a/src/common/message-template.h b/src/common/message-template.h index 957c540fe9..bb1b284d91 100644 --- a/src/common/message-template.h +++ b/src/common/message-template.h @@ -654,6 +654,7 @@ namespace internal { T(WasmTrapStringInvalidUtf8, "invalid UTF-8 string") \ T(WasmTrapStringInvalidWtf8, "invalid WTF-8 string") \ T(WasmTrapStringOffsetOutOfBounds, "string offset out of bounds") \ + T(WasmTrapBadSuspender, "invalid suspender object for suspend") \ T(WasmTrapStringIsolatedSurrogate, \ "Failed to encode string as UTF-8: contains unpaired surrogate") \ T(WasmExceptionError, "wasm exception") \ diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index 04766b2e9d..6fd0c2696d 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -6891,12 +6891,20 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // If value is a promise, suspend to the js-to-wasm prompt, and resume later // with the promise's resolved value. auto resume = gasm_->MakeLabel(MachineRepresentation::kTagged); - gasm_->GotoIf(IsSmi(value), &resume, value); - gasm_->GotoIfNot(gasm_->HasInstanceType(value, JS_PROMISE_TYPE), &resume, - BranchHint::kTrue, value); + // Trap if the suspender argument is not the active suspender or if there is + // no active suspender. + auto bad_suspender = gasm_->MakeDeferredLabel(); Node* native_context = gasm_->Load( MachineType::TaggedPointer(), api_function_ref, wasm::ObjectAccess::ToTagged(WasmApiFunctionRef::kNativeContextOffset)); + Node* active_suspender = LOAD_ROOT(ActiveSuspender, active_suspender); + gasm_->GotoIf(gasm_->TaggedEqual(active_suspender, UndefinedValue()), + &bad_suspender, BranchHint::kFalse); + gasm_->GotoIfNot(gasm_->TaggedEqual(suspender, active_suspender), + &bad_suspender, BranchHint::kFalse); + gasm_->GotoIf(IsSmi(value), &resume, value); + gasm_->GotoIfNot(gasm_->HasInstanceType(value, JS_PROMISE_TYPE), &resume, + BranchHint::kTrue, value); auto* call_descriptor = GetBuiltinCallDescriptor( Builtin::kWasmSuspend, zone_, StubCallMode::kCallWasmRuntimeStub); Node* call_target = mcgraph()->RelocatableIntPtrConstant( @@ -6907,6 +6915,10 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { Node* resolved = gasm_->Call(call_descriptor, call_target, chained_promise, suspender); gasm_->Goto(&resume, resolved); + gasm_->Bind(&bad_suspender); + BuildCallToRuntimeWithContext(Runtime::kThrowBadSuspenderError, + native_context, nullptr, 0); + TerminateThrow(effect(), control()); gasm_->Bind(&resume); return resume.PhiAt(0); } diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 221fce892f..1a8744d262 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -194,6 +194,15 @@ RUNTIME_FUNCTION(Runtime_WasmThrowJSTypeError) { isolate, NewTypeError(MessageTemplate::kWasmTrapJSTypeError)); } +// This error is thrown from a wasm-to-JS wrapper, so unlike +// Runtime_ThrowWasmError, this function does not check or unset the +// thread-in-wasm flag. +RUNTIME_FUNCTION(Runtime_ThrowBadSuspenderError) { + HandleScope scope(isolate); + DCHECK_EQ(0, args.length()); + return ThrowWasmError(isolate, MessageTemplate::kWasmTrapBadSuspender); +} + RUNTIME_FUNCTION(Runtime_WasmThrow) { ClearThreadInWasmScope clear_wasm_flag(isolate); HandleScope scope(isolate); diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index 149df155ff..61f3d2a41d 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -598,6 +598,7 @@ namespace internal { F(TypedArraySortFast, 1, 1) #define FOR_EACH_INTRINSIC_WASM(F, I) \ + F(ThrowBadSuspenderError, 0, 1) \ F(ThrowWasmError, 1, 1) \ F(ThrowWasmStackOverflow, 0, 1) \ F(WasmI32AtomicWait, 4, 1) \ diff --git a/test/mjsunit/wasm/stack-switching.js b/test/mjsunit/wasm/stack-switching.js index 3e577cb2d4..65d2704efa 100644 --- a/test/mjsunit/wasm/stack-switching.js +++ b/test/mjsunit/wasm/stack-switching.js @@ -495,3 +495,29 @@ function TestNestedSuspenders(suspend) { let wrapper = ToPromising(instance.exports.test); assertThrows(wrapper, RangeError, /Maximum call stack size exceeded/); })(); + +(function TestBadSuspender() { + print(arguments.callee.name); + let builder = new WasmModuleBuilder(); + let import_index = builder.addImport('m', 'import', kSig_i_r); + builder.addFunction("test", kSig_i_r) + .addBody([ + kExprLocalGet, 0, + kExprCallFunction, import_index, // suspend + ]).exportFunc(); + builder.addFunction("return_suspender", kSig_r_r) + .addBody([ + kExprLocalGet, 0 + ]).exportFunc(); + let js_import = new WebAssembly.Function( + {parameters: ['externref'], results: ['i32']}, + () => Promise.resolve(42), + {suspending: 'first'}); + let instance = builder.instantiate({m: {import: js_import}}); + let suspender = ToPromising(instance.exports.return_suspender)(); + for (s of [suspender, null, undefined, {}]) { + assertThrows(() => instance.exports.test(s), + WebAssembly.RuntimeError, + /invalid suspender object for suspend/); + } +})(); From 210563a16ff6b43265867c6f3f06065d4a304e7c Mon Sep 17 00:00:00 2001 From: Fabrice de Gans Date: Mon, 12 Sep 2022 15:21:16 -0700 Subject: [PATCH 0089/1772] [code-health] Fix syntax error in python file Bug: v8:8594 Change-Id: I734a548b074567af3cad6359ef96640cbf0eb6f3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892137 Commit-Queue: Fabrice de Gans Auto-Submit: Fabrice de Gans Reviewed-by: Alexander Schulze Cr-Commit-Position: refs/heads/main@{#83174} --- tools/clusterfuzz/js_fuzzer/tools/minimize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/clusterfuzz/js_fuzzer/tools/minimize.py b/tools/clusterfuzz/js_fuzzer/tools/minimize.py index be864d5463..de4e136860 100644 --- a/tools/clusterfuzz/js_fuzzer/tools/minimize.py +++ b/tools/clusterfuzz/js_fuzzer/tools/minimize.py @@ -23,7 +23,7 @@ OUT_PATH = os.path.join(BASE_PATH, 'out.js') FAILURES_JSON_PATH = os.path.join( BASE_PATH, 'workdir', 'output', 'failures.json') -assert(len(sys.argv) > 1, 'Need to specify minimizer path.') +assert len(sys.argv) > 1, 'Need to specify minimizer path.' minimizer_path = sys.argv[1] def getcmd(command): From b0bc960a0ed65c051ce077b9745d1201a86bfc82 Mon Sep 17 00:00:00 2001 From: v8-ci-autoroll-builder Date: Tue, 13 Sep 2022 20:15:41 -0700 Subject: [PATCH 0090/1772] Update V8 DEPS (trusted) Rolling v8/build: https://chromium.googlesource.com/chromium/src/build/+log/7fcb69a..4157fb6 Rolling v8/buildtools: https://chromium.googlesource.com/chromium/src/buildtools/+log/4276428..e713c13 Rolling v8/buildtools/third_party/libc++/trunk: https://chromium.googlesource.com/external/github.com/llvm/llvm-project/libcxx/+log/60f9078..c1e647c Rolling v8/third_party/catapult: https://chromium.googlesource.com/catapult/+log/4864449..37391a1 Rolling v8/third_party/depot_tools: https://chromium.googlesource.com/chromium/tools/depot_tools/+log/2d25dbd..9ebcfa6 Rolling v8/third_party/fuchsia-sdk/sdk: version:9.20220912.3.1..version:9.20220913.3.1 Rolling v8/third_party/zlib: https://chromium.googlesource.com/chromium/src/third_party/zlib/+log/05e137d..f48cb14 Rolling v8/tools/clang: https://chromium.googlesource.com/chromium/src/tools/clang/+log/2a5ebae..02a202a R=v8-waterfall-sheriff@grotations.appspotmail.com,mtv-sf-v8-sheriff@grotations.appspotmail.com Change-Id: I5cc2b3bdb94bd9786f11095169c3e193f8876ad9 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3893427 Commit-Queue: v8-ci-autoroll-builder Bot-Commit: v8-ci-autoroll-builder Cr-Commit-Position: refs/heads/main@{#83175} --- DEPS | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/DEPS b/DEPS index 02fa1342f4..595c243abc 100644 --- a/DEPS +++ b/DEPS @@ -54,7 +54,7 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:9.20220912.3.1', + 'fuchsia_version': 'version:9.20220913.3.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -94,9 +94,9 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '7fcb69a42d71a2ab52b833bdc5f0e83536c31ef4', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '4157fb6cb44135013300168c9f4c5b95d04acf70', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '42764285a09b521f7764ceff8f8dbefa8dd26cb6', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'e713c13e2fa3b7aa9131276f27990011e1aa6a73', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7', 'buildtools/linux64': { @@ -120,7 +120,7 @@ deps = { 'condition': 'host_os == "mac"', }, 'buildtools/third_party/libc++/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '60f90783c34aeab2c49682c6d4ce5520c8cb56b3', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'c1e647c7c30238f7c512457eec55798e3458fd8a', 'buildtools/third_party/libc++abi/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '5c3e02e92ae8bbc1bf1001bd9ef0d76e044ddb86', 'buildtools/third_party/libunwind/trunk': @@ -198,7 +198,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '486444967e3ba7da8e2a97b5a4f39f58125b2ab1', + 'url': Var('chromium_url') + '/catapult.git' + '@' + '37391a1619e953e23d3441dbc61e658e881fede4', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -206,7 +206,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '2d25dbd149b460cc1fa96acbcb1797a12b3c0771', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '9ebcfa6be17c2d1e7bd72135ceab5e767ed89b7d', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { @@ -249,9 +249,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '05e137d33c6a11a93cefe6553f4f983edf9b2de4', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'f48cb14d487038d20c85680e29351e095a0fea8b', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '2a5ebae0f797d7ad1f27d7f20bd926ce76c29411', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '02a202a7b1fa863352c0c9fb088fd3c0cf48c978', 'tools/luci-go': { 'packages': [ { From 2124146565b398d3fc3c45a345544d09ed7f2c23 Mon Sep 17 00:00:00 2001 From: Liu Yu Date: Wed, 14 Sep 2022 09:56:06 +0800 Subject: [PATCH 0091/1772] [loong64][mips64] Fix LoadSpillAddress on big endian Besides, fix a wrong instruction in mips64. Port commit ac0cedf1615db8d38a68de29210c5fff83a6f327 Change-Id: I3c8c73eacc2aa1b5f4a583a0187261455917ad7a Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892526 Auto-Submit: Liu Yu Commit-Queue: Zhao Jiazhong Reviewed-by: Zhao Jiazhong Cr-Commit-Position: refs/heads/main@{#83176} --- src/wasm/baseline/loong64/liftoff-assembler-loong64.h | 3 ++- src/wasm/baseline/mips64/liftoff-assembler-mips64.h | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h index 5c4ac0f0a1..5c2d1b1e55 100644 --- a/src/wasm/baseline/loong64/liftoff-assembler-loong64.h +++ b/src/wasm/baseline/loong64/liftoff-assembler-loong64.h @@ -1012,7 +1012,8 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } -void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, + ValueKind /* kind */) { Sub_d(dst, fp, Operand(offset)); } diff --git a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index 462783eb3e..a145c54da9 100644 --- a/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -1109,8 +1109,9 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { } } -void LiftoffAssembler::LoadSpillAddress(Register dst, int offset) { - Dsub(dst, fp, Operand(offset)); +void LiftoffAssembler::LoadSpillAddress(Register dst, int offset, + ValueKind /* kind */) { + Dsubu(dst, fp, Operand(offset)); } void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { From ae2ef7d234851245f17daad206fcafb5a912c3b4 Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Tue, 13 Sep 2022 22:03:13 +0200 Subject: [PATCH 0092/1772] [test] Drain queues asynchroneously when terminating workers Joining a queue-using process can deadlock if the child process is about to write to the queue, but the parent process wants to join the child. To fix this, we now drain elements from a separate thread of the main process. Bug: v8:13113 Change-Id: Ic279e66ab84eb89a4034ff1f2c025eb850b65013 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891116 Commit-Queue: Michael Achenbach Reviewed-by: Alexander Schulze Cr-Commit-Position: refs/heads/main@{#83177} --- tools/testrunner/local/pool.py | 64 +++++++++++++++-------------- tools/testrunner/local/pool_test.py | 16 +++++++- 2 files changed, 49 insertions(+), 31 deletions(-) diff --git a/tools/testrunner/local/pool.py b/tools/testrunner/local/pool.py index bf04e1f055..527070cac4 100644 --- a/tools/testrunner/local/pool.py +++ b/tools/testrunner/local/pool.py @@ -7,6 +7,7 @@ import collections import logging import os import signal +import threading import traceback from contextlib import contextmanager @@ -25,8 +26,6 @@ def setup_testing(): del Process from queue import Queue from threading import Thread as Process - # Monkeypatch threading Queue to look like multiprocessing Queue. - Queue.cancel_join_thread = lambda self: None # Monkeypatch os.kill and add fake pid property on Thread. os.kill = lambda *args: None Process.pid = property(lambda self: None) @@ -108,6 +107,36 @@ def without_sig(): signal.signal(signal.SIGTERM, term_handler) +@contextmanager +def drain_queue_async(queue): + """Drains a queue in a background thread until the wrapped code unblocks. + + This can be used to unblock joining a child process that might still write + to the queue. The join should be wrapped by this context manager. + """ + keep_running = True + + def empty_queue(): + elem_count = 0 + while keep_running: + try: + while True: + queue.get(True, 0.1) + elem_count += 1 + if elem_count < 200: + logging.info('Drained an element from queue.') + except Empty: + pass + except: + logging.exception('Error draining queue.') + + emptier = threading.Thread(target=empty_queue) + emptier.start() + yield + keep_running = False + emptier.join() + + class ContextPool(): def __init__(self): @@ -325,35 +354,10 @@ class DefaultExecutionPool(ContextPool): self._terminate_processes() self.notify("Joining workers") - for p in self.processes: - p.join() + with drain_queue_async(self.done_queue): + for p in self.processes: + p.join() - # Drain the queues to prevent stderr chatter when queues are garbage - # collected. - self.notify("Draining queues") - # TODO(https://crbug.com/v8/13113): Remove extra logging after - # investigation. - elem_count = 0 - try: - while True: - self.work_queue.get(False) - elem_count += 1 - if elem_count < 200: - logging.info('Drained an element from work queue.') - except Empty: - pass - except: - logging.exception('Error draining work queue.') - try: - while True: - self.done_queue.get(False) - elem_count += 1 - if elem_count < 200: - logging.info('Drained an element from done queue.') - except Empty: - pass - except: - logging.exception('Error draining done queue.') self.notify("Pool terminated") def _get_result_from_queue(self): diff --git a/tools/testrunner/local/pool_test.py b/tools/testrunner/local/pool_test.py index acd597ee6c..e023ae188c 100755 --- a/tools/testrunner/local/pool_test.py +++ b/tools/testrunner/local/pool_test.py @@ -7,12 +7,14 @@ import os import sys import unittest +from queue import Empty, Full, Queue + # Needed because the test runner contains relative imports. TOOLS_PATH = os.path.dirname( os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(TOOLS_PATH) -from testrunner.local.pool import DefaultExecutionPool +from testrunner.local.pool import DefaultExecutionPool, drain_queue_async def Run(x): @@ -64,5 +66,17 @@ class PoolTest(unittest.TestCase): set(range(0, 10)) | set(range(20, 30)) | set(range(40, 50)), results) +class QueueTest(unittest.TestCase): + def testDrainQueueAsync(self): + queue = Queue(1) + queue.put('foo') + with self.assertRaises(Full): + queue.put('bar', timeout=0.01) + with drain_queue_async(queue): + queue.put('bar') + with self.assertRaises(Empty): + queue.get(False) + + if __name__ == '__main__': unittest.main() From 10756bea834a35ddc604e21e077487ecf0aa3de9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Tue, 13 Sep 2022 07:09:26 +0200 Subject: [PATCH 0093/1772] [heap] Add shared spaces for --shared-space MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This CL adds shared spaces for regular and large objects in the shared space isolate. Spaces aren't used for allocation yet. Bug: v8:13267 Change-Id: If508144530f4c9a1b3c0567570165955b64cc200 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3876824 Reviewed-by: Jakob Linke Commit-Queue: Dominik Inführ Reviewed-by: Michael Lippautz Cr-Commit-Position: refs/heads/main@{#83178} --- src/common/globals.h | 24 ++++++++++--------- src/flags/flag-definitions.h | 2 ++ src/heap/base-space.cc | 4 ++++ src/heap/heap-allocator.cc | 2 +- src/heap/heap-inl.h | 2 ++ src/heap/heap.cc | 45 ++++++++++++++++++++++++++---------- src/heap/heap.h | 13 +++++++---- src/heap/large-spaces.cc | 10 ++++++++ src/heap/large-spaces.h | 8 +++++++ src/heap/paged-spaces.h | 26 +++++++++++++++++++++ src/snapshot/serializer.cc | 2 ++ 11 files changed, 109 insertions(+), 29 deletions(-) diff --git a/src/common/globals.h b/src/common/globals.h index 68634d7d1c..34a58f5201 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -969,20 +969,22 @@ using WeakSlotCallbackWithHeap = bool (*)(Heap* heap, FullObjectSlot pointer); // NOTE: SpaceIterator depends on AllocationSpace enumeration values being // consecutive. enum AllocationSpace { - RO_SPACE, // Immortal, immovable and immutable objects, - OLD_SPACE, // Old generation regular object space. - CODE_SPACE, // Old generation code object space, marked executable. - MAP_SPACE, // Old generation map object space, non-movable. - NEW_SPACE, // Young generation space for regular objects collected - // with Scavenger/MinorMC. - LO_SPACE, // Old generation large object space. - CODE_LO_SPACE, // Old generation large code object space. - NEW_LO_SPACE, // Young generation large object space. + RO_SPACE, // Immortal, immovable and immutable objects, + OLD_SPACE, // Old generation regular object space. + CODE_SPACE, // Old generation code object space, marked executable. + MAP_SPACE, // Old generation map object space, non-movable. + NEW_SPACE, // Young generation space for regular objects collected + // with Scavenger/MinorMC. + SHARED_SPACE, // Space shared between multiple isolates. Optional. + LO_SPACE, // Old generation large object space. + CODE_LO_SPACE, // Old generation large code object space. + NEW_LO_SPACE, // Young generation large object space. + SHARED_LO_SPACE, // Space shared between multiple isolates. Optional. FIRST_SPACE = RO_SPACE, - LAST_SPACE = NEW_LO_SPACE, + LAST_SPACE = SHARED_LO_SPACE, FIRST_MUTABLE_SPACE = OLD_SPACE, - LAST_MUTABLE_SPACE = NEW_LO_SPACE, + LAST_MUTABLE_SPACE = SHARED_LO_SPACE, FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE, LAST_GROWABLE_PAGED_SPACE = MAP_SPACE, FIRST_SWEEPABLE_SPACE = OLD_SPACE, diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index 16f796d43d..113d4f6ec0 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -1224,6 +1224,8 @@ DEFINE_BOOL(global_gc_scheduling, true, DEFINE_BOOL(gc_global, false, "always perform global GCs") DEFINE_BOOL(shared_space, false, "Implement shared heap as shared space on a main isolate.") +// Don't use a map space with --shared-space in order to avoid shared map space. +DEFINE_NEG_IMPLICATION(shared_space, use_map_space) // TODO(12950): The next two flags only have an effect if // V8_ENABLE_ALLOCATION_TIMEOUT is set, so we should only define them in that diff --git a/src/heap/base-space.cc b/src/heap/base-space.cc index aabbeaebf5..bfcacbcee3 100644 --- a/src/heap/base-space.cc +++ b/src/heap/base-space.cc @@ -17,12 +17,16 @@ const char* BaseSpace::GetSpaceName(AllocationSpace space) { return "map_space"; case CODE_SPACE: return "code_space"; + case SHARED_SPACE: + return "shared_space"; case LO_SPACE: return "large_object_space"; case NEW_LO_SPACE: return "new_large_object_space"; case CODE_LO_SPACE: return "code_large_object_space"; + case SHARED_LO_SPACE: + return "shared_lo_space"; case RO_SPACE: return "read_only_space"; } diff --git a/src/heap/heap-allocator.cc b/src/heap/heap-allocator.cc index c78098ef28..be23977973 100644 --- a/src/heap/heap-allocator.cc +++ b/src/heap/heap-allocator.cc @@ -31,7 +31,7 @@ void HeapAllocator::Setup() { shared_map_allocator_ = heap_->shared_map_allocator_ ? heap_->shared_map_allocator_.get() : shared_old_allocator_; - shared_lo_space_ = heap_->shared_lo_space(); + shared_lo_space_ = heap_->shared_isolate_lo_space_; } void HeapAllocator::SetReadOnlySpace(ReadOnlySpace* read_only_space) { diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h index 6991a6dca5..c58cc702ab 100644 --- a/src/heap/heap-inl.h +++ b/src/heap/heap-inl.h @@ -485,6 +485,8 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) { return addr == large_space->pending_object(); } + case SHARED_SPACE: + case SHARED_LO_SPACE: case RO_SPACE: UNREACHABLE(); } diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 5a307ff9e1..fba1e099fb 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -4317,9 +4317,10 @@ bool Heap::ContainsCode(HeapObject value) const { } bool Heap::SharedHeapContains(HeapObject value) const { - if (shared_old_space_) - return shared_old_space_->Contains(value) || - (shared_map_space_ && shared_map_space_->Contains(value)); + if (shared_isolate_old_space_) + return shared_isolate_old_space_->Contains(value) || + (shared_isolate_map_space_ && + shared_isolate_map_space_->Contains(value)); return false; } @@ -4350,12 +4351,16 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) const { case MAP_SPACE: DCHECK(map_space_); return map_space_->Contains(value); + case SHARED_SPACE: + return shared_space_->Contains(value); case LO_SPACE: return lo_space_->Contains(value); case CODE_LO_SPACE: return code_lo_space_->Contains(value); case NEW_LO_SPACE: return new_lo_space_->Contains(value); + case SHARED_LO_SPACE: + return shared_lo_space_->Contains(value); case RO_SPACE: return ReadOnlyHeap::Contains(value); } @@ -4380,12 +4385,16 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const { case MAP_SPACE: DCHECK(map_space_); return map_space_->ContainsSlow(addr); + case SHARED_SPACE: + return shared_space_->ContainsSlow(addr); case LO_SPACE: return lo_space_->ContainsSlow(addr); case CODE_LO_SPACE: return code_lo_space_->ContainsSlow(addr); case NEW_LO_SPACE: return new_lo_space_->ContainsSlow(addr); + case SHARED_LO_SPACE: + return shared_lo_space_->ContainsSlow(addr); case RO_SPACE: return read_only_space_->ContainsSlow(addr); } @@ -4398,9 +4407,11 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) { case OLD_SPACE: case CODE_SPACE: case MAP_SPACE: + case SHARED_SPACE: case LO_SPACE: case NEW_LO_SPACE: case CODE_LO_SPACE: + case SHARED_LO_SPACE: case RO_SPACE: return true; default: @@ -5438,8 +5449,15 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info, if (v8_flags.use_map_space) { space_[MAP_SPACE] = map_space_ = new MapSpace(this); } + if (v8_flags.shared_space && isolate()->is_shared_space_isolate()) { + space_[SHARED_SPACE] = shared_space_ = new SharedSpace(this); + } space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this); space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this); + if (v8_flags.shared_space && isolate()->is_shared_space_isolate()) { + space_[SHARED_LO_SPACE] = shared_lo_space_ = + new SharedLargeObjectSpace(this); + } for (int i = 0; i < static_cast(v8::Isolate::kUseCounterFeatureCount); i++) { @@ -5517,15 +5535,15 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info, if (isolate()->shared_isolate()) { Heap* shared_heap = isolate()->shared_isolate()->heap(); - shared_old_space_ = shared_heap->old_space(); - shared_lo_space_ = shared_heap->lo_space(); - shared_old_allocator_.reset( - new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_)); + shared_isolate_old_space_ = shared_heap->old_space(); + shared_isolate_lo_space_ = shared_heap->lo_space(); + shared_old_allocator_.reset(new ConcurrentAllocator( + main_thread_local_heap(), shared_isolate_old_space_)); if (shared_heap->map_space()) { - shared_map_space_ = shared_heap->map_space(); - shared_map_allocator_.reset( - new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_)); + shared_isolate_map_space_ = shared_heap->map_space(); + shared_map_allocator_.reset(new ConcurrentAllocator( + main_thread_local_heap(), shared_isolate_map_space_)); } } @@ -5834,10 +5852,10 @@ void Heap::TearDown() { allocation_sites_to_pretenure_.reset(); - shared_old_space_ = nullptr; + shared_isolate_old_space_ = nullptr; shared_old_allocator_.reset(); - shared_map_space_ = nullptr; + shared_isolate_map_space_ = nullptr; shared_map_allocator_.reset(); { @@ -6771,9 +6789,12 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) { return dst == CODE_SPACE && type == CODE_TYPE; case MAP_SPACE: return dst == MAP_SPACE && type == MAP_TYPE; + case SHARED_SPACE: + return dst == SHARED_SPACE; case LO_SPACE: case CODE_LO_SPACE: case NEW_LO_SPACE: + case SHARED_LO_SPACE: case RO_SPACE: return false; } diff --git a/src/heap/heap.h b/src/heap/heap.h index daca783901..429b2bb601 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -127,7 +127,9 @@ class SafepointScope; class ScavengeJob; class Scavenger; class ScavengerCollector; +class SharedLargeObjectSpace; class SharedReadOnlySpace; +class SharedSpace; class Space; class StressScavengeObserver; class TimedHistogram; @@ -876,12 +878,11 @@ class Heap { NewSpace* new_space() const { return new_space_; } inline PagedNewSpace* paged_new_space() const; OldSpace* old_space() const { return old_space_; } - OldSpace* shared_old_space() const { return shared_old_space_; } + OldSpace* shared_old_space() const { return shared_isolate_old_space_; } CodeSpace* code_space() const { return code_space_; } MapSpace* map_space() const { return map_space_; } inline PagedSpace* space_for_maps(); OldLargeObjectSpace* lo_space() const { return lo_space_; } - OldLargeObjectSpace* shared_lo_space() const { return shared_lo_space_; } CodeLargeObjectSpace* code_lo_space() const { return code_lo_space_; } NewLargeObjectSpace* new_lo_space() const { return new_lo_space_; } ReadOnlySpace* read_only_space() const { return read_only_space_; } @@ -2190,14 +2191,16 @@ class Heap { OldSpace* old_space_ = nullptr; CodeSpace* code_space_ = nullptr; MapSpace* map_space_ = nullptr; + SharedSpace* shared_space_ = nullptr; OldLargeObjectSpace* lo_space_ = nullptr; CodeLargeObjectSpace* code_lo_space_ = nullptr; NewLargeObjectSpace* new_lo_space_ = nullptr; + SharedLargeObjectSpace* shared_lo_space_ = nullptr; ReadOnlySpace* read_only_space_ = nullptr; - OldSpace* shared_old_space_ = nullptr; - OldLargeObjectSpace* shared_lo_space_ = nullptr; - MapSpace* shared_map_space_ = nullptr; + OldSpace* shared_isolate_old_space_ = nullptr; + OldLargeObjectSpace* shared_isolate_lo_space_ = nullptr; + MapSpace* shared_isolate_map_space_ = nullptr; std::unique_ptr shared_old_allocator_; std::unique_ptr shared_map_allocator_; diff --git a/src/heap/large-spaces.cc b/src/heap/large-spaces.cc index 74c621e81f..2baed404a2 100644 --- a/src/heap/large-spaces.cc +++ b/src/heap/large-spaces.cc @@ -582,5 +582,15 @@ void CodeLargeObjectSpace::RemovePage(LargePage* page) { OldLargeObjectSpace::RemovePage(page); } +SharedLargeObjectSpace::SharedLargeObjectSpace(Heap* heap) + : OldLargeObjectSpace(heap, SHARED_LO_SPACE) {} + +AllocationResult SharedLargeObjectSpace::AllocateRawBackground( + LocalHeap* local_heap, int object_size) { + DCHECK(!v8_flags.enable_third_party_heap); + return OldLargeObjectSpace::AllocateRawBackground(local_heap, object_size, + NOT_EXECUTABLE); +} + } // namespace internal } // namespace v8 diff --git a/src/heap/large-spaces.h b/src/heap/large-spaces.h index 70c55833e1..576c672fff 100644 --- a/src/heap/large-spaces.h +++ b/src/heap/large-spaces.h @@ -190,6 +190,14 @@ class OldLargeObjectSpace : public LargeObjectSpace { LocalHeap* local_heap, int object_size, Executability executable); }; +class SharedLargeObjectSpace : public OldLargeObjectSpace { + public: + explicit SharedLargeObjectSpace(Heap* heap); + + V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult + AllocateRawBackground(LocalHeap* local_heap, int object_size); +}; + class NewLargeObjectSpace : public LargeObjectSpace { public: NewLargeObjectSpace(Heap* heap, size_t capacity); diff --git a/src/heap/paged-spaces.h b/src/heap/paged-spaces.h index 7241a29b0e..986aed3a31 100644 --- a/src/heap/paged-spaces.h +++ b/src/heap/paged-spaces.h @@ -571,6 +571,32 @@ class MapSpace final : public PagedSpace { LinearAllocationArea paged_allocation_info_; }; +// ----------------------------------------------------------------------------- +// Shared space regular object space. + +class SharedSpace final : public PagedSpace { + public: + // Creates an old space object. The constructor does not allocate pages + // from OS. + explicit SharedSpace(Heap* heap) + : PagedSpace(heap, SHARED_SPACE, NOT_EXECUTABLE, + FreeList::CreateFreeList(), allocation_info) {} + + static bool IsAtPageStart(Address addr) { + return static_cast(addr & kPageAlignmentMask) == + MemoryChunkLayout::ObjectStartOffsetInDataPage(); + } + + size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final { + if (type == ExternalBackingStoreType::kArrayBuffer) return 0; + DCHECK_EQ(type, ExternalBackingStoreType::kExternalString); + return external_backing_store_bytes_[type]; + } + + private: + LinearAllocationArea allocation_info; +}; + // Iterates over the chunks (pages and large object pages) that can contain // pointers to new space or to evacuation candidates. class OldGenerationMemoryChunkIterator { diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc index 4410790f19..e9971705ec 100644 --- a/src/snapshot/serializer.cc +++ b/src/snapshot/serializer.cc @@ -788,6 +788,8 @@ SnapshotSpace GetSnapshotSpace(HeapObject object) { return SnapshotSpace::kCode; case MAP_SPACE: return SnapshotSpace::kMap; + case SHARED_SPACE: + case SHARED_LO_SPACE: case CODE_LO_SPACE: case RO_SPACE: UNREACHABLE(); From c8a2d899e29c3422b28bab93f3d422cf57f9d7cc Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Tue, 13 Sep 2022 16:59:36 +0200 Subject: [PATCH 0094/1772] [maglev] Move compilation info out of graph processor Move the CompilationInfo out of the GraphProcessor and into the individual NodeProcessors, allowing them to hold it as a field rather than getting it passed in via the various process methods. This will allow us to write graph processors that don't have/need access to the compilation info. Bug: v8:7700 Change-Id: I8b91cbeaf632f05ae8bbbe8783e5a7381b5c8e53 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892698 Auto-Submit: Leszek Swirski Reviewed-by: Jakob Linke Commit-Queue: Jakob Linke Cr-Commit-Position: refs/heads/main@{#83179} --- src/maglev/maglev-code-generator.cc | 8 +- src/maglev/maglev-compiler.cc | 37 +++++---- src/maglev/maglev-graph-printer.cc | 121 +++++++++++++--------------- src/maglev/maglev-graph-printer.h | 10 ++- src/maglev/maglev-graph-processor.h | 71 ++++++++-------- src/maglev/maglev-graph-verifier.h | 7 +- src/maglev/maglev-regalloc.cc | 34 +++----- src/maglev/maglev-vreg-allocator.h | 6 +- 8 files changed, 137 insertions(+), 157 deletions(-) diff --git a/src/maglev/maglev-code-generator.cc b/src/maglev/maglev-code-generator.cc index 2c8c14dd5e..07db281112 100644 --- a/src/maglev/maglev-code-generator.cc +++ b/src/maglev/maglev-code-generator.cc @@ -636,7 +636,7 @@ class MaglevCodeGeneratingNodeProcessor { explicit MaglevCodeGeneratingNodeProcessor(MaglevAssembler* masm) : masm_(masm) {} - void PreProcessGraph(MaglevCompilationInfo*, Graph* graph) { + void PreProcessGraph(Graph* graph) { if (FLAG_maglev_break_on_entry) { __ int3(); } @@ -748,7 +748,7 @@ class MaglevCodeGeneratingNodeProcessor { } } - void PostProcessGraph(MaglevCompilationInfo*, Graph*) { + void PostProcessGraph(Graph*) { __ int3(); __ bind(&deferred_call_stack_guard_); ASM_CODE_COMMENT_STRING(masm(), "Stack/interrupt call"); @@ -763,7 +763,7 @@ class MaglevCodeGeneratingNodeProcessor { __ jmp(&deferred_call_stack_guard_return_); } - void PreProcessBasicBlock(MaglevCompilationInfo*, BasicBlock* block) { + void PreProcessBasicBlock(BasicBlock* block) { if (FLAG_code_comments) { std::stringstream ss; ss << "-- Block b" << graph_labeller()->BlockId(block); @@ -955,7 +955,7 @@ class MaglevCodeGeneratorImpl final { graph->untagged_stack_slots()), code_gen_state_(compilation_info, safepoint_table_builder()), masm_(&code_gen_state_), - processor_(compilation_info, &masm_), + processor_(&masm_), graph_(graph) {} MaybeHandle Generate() { diff --git a/src/maglev/maglev-compiler.cc b/src/maglev/maglev-compiler.cc index bcc253b692..78decb2857 100644 --- a/src/maglev/maglev-compiler.cc +++ b/src/maglev/maglev-compiler.cc @@ -26,6 +26,7 @@ #include "src/ic/handler-configuration.h" #include "src/maglev/maglev-basic-block.h" #include "src/maglev/maglev-code-generator.h" +#include "src/maglev/maglev-compilation-info.h" #include "src/maglev/maglev-compilation-unit.h" #include "src/maglev/maglev-graph-builder.h" #include "src/maglev/maglev-graph-labeller.h" @@ -48,13 +49,12 @@ namespace maglev { class UseMarkingProcessor { public: - void PreProcessGraph(MaglevCompilationInfo*, Graph* graph) { - next_node_id_ = kFirstValidNodeId; - } - void PostProcessGraph(MaglevCompilationInfo*, Graph* graph) { - DCHECK(loop_used_nodes_.empty()); - } - void PreProcessBasicBlock(MaglevCompilationInfo*, BasicBlock* block) { + explicit UseMarkingProcessor(MaglevCompilationInfo* compilation_info) + : compilation_info_(compilation_info) {} + + void PreProcessGraph(Graph* graph) { next_node_id_ = kFirstValidNodeId; } + void PostProcessGraph(Graph* graph) { DCHECK(loop_used_nodes_.empty()); } + void PreProcessBasicBlock(BasicBlock* block) { if (!block->has_state()) return; if (block->state()->is_loop()) { loop_used_nodes_.push_back(LoopUsedNodes{next_node_id_, {}}); @@ -121,7 +121,7 @@ class UseMarkingProcessor { // loop, allow nodes to be "moved" between lifetime extensions. LoopUsedNodes* outer_loop_used_nodes = GetCurrentLoopUsedNodes(); base::Vector used_node_inputs = - state.compilation_info()->zone()->NewVector( + compilation_info_->zone()->NewVector( loop_used_nodes.used_nodes.size()); int i = 0; for (ValueNode* used_node : loop_used_nodes.used_nodes) { @@ -228,29 +228,31 @@ class UseMarkingProcessor { }); } + MaglevCompilationInfo* compilation_info_; uint32_t next_node_id_; std::vector loop_used_nodes_; }; class TranslationArrayProcessor { public: - explicit TranslationArrayProcessor(LocalIsolate* local_isolate) - : local_isolate_(local_isolate) {} + explicit TranslationArrayProcessor(LocalIsolate* local_isolate, + MaglevCompilationInfo* compilation_info) + : local_isolate_(local_isolate), compilation_info_(compilation_info) {} - void PreProcessGraph(MaglevCompilationInfo* compilation_info, Graph* graph) { + void PreProcessGraph(Graph* graph) { translation_array_builder_.reset( - new TranslationArrayBuilder(compilation_info->zone())); + new TranslationArrayBuilder(compilation_info_->zone())); deopt_literals_.reset(new IdentityMap( local_isolate_->heap()->heap())); tagged_slots_ = graph->tagged_stack_slots(); } - void PostProcessGraph(MaglevCompilationInfo* compilation_info, Graph* graph) { - compilation_info->set_translation_array_builder( + void PostProcessGraph(Graph* graph) { + compilation_info_->set_translation_array_builder( std::move(translation_array_builder_), std::move(deopt_literals_)); } - void PreProcessBasicBlock(MaglevCompilationInfo*, BasicBlock* block) {} + void PreProcessBasicBlock(BasicBlock* block) {} void Process(NodeBase* node, const ProcessingState& state) { if (node->properties().can_eager_deopt()) { @@ -524,6 +526,7 @@ class TranslationArrayProcessor { } LocalIsolate* local_isolate_; + MaglevCompilationInfo* compilation_info_; std::unique_ptr translation_array_builder_; std::unique_ptr> deopt_literals_; @@ -572,7 +575,7 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate, { GraphMultiProcessor processor( - compilation_info); + UseMarkingProcessor{compilation_info}); processor.ProcessGraph(graph_builder.graph()); } @@ -590,7 +593,7 @@ void MaglevCompiler::Compile(LocalIsolate* local_isolate, } GraphProcessor build_translation_array( - compilation_info, local_isolate); + local_isolate, compilation_info); build_translation_array.ProcessGraph(graph_builder.graph()); // Stash the compiled graph on the compilation info. diff --git a/src/maglev/maglev-graph-printer.cc b/src/maglev/maglev-graph-printer.cc index 95f0165a77..3f4cec406c 100644 --- a/src/maglev/maglev-graph-printer.cc +++ b/src/maglev/maglev-graph-printer.cc @@ -242,13 +242,14 @@ int MaglevPrintingVisitorOstream::overflow(int c) { } // namespace -MaglevPrintingVisitor::MaglevPrintingVisitor(std::ostream& os) - : os_(os), +MaglevPrintingVisitor::MaglevPrintingVisitor( + MaglevGraphLabeller* graph_labeller, std::ostream& os) + : graph_labeller_(graph_labeller), + os_(os), os_for_additional_info_( new MaglevPrintingVisitorOstream(os_, &targets_)) {} -void MaglevPrintingVisitor::PreProcessGraph( - MaglevCompilationInfo* compilation_info, Graph* graph) { +void MaglevPrintingVisitor::PreProcessGraph(Graph* graph) { os_ << "Graph\n\n"; for (BasicBlock* block : *graph) { @@ -303,10 +304,7 @@ void MaglevPrintingVisitor::PreProcessGraph( [](BasicBlock* block) { return block == nullptr; })); } -void MaglevPrintingVisitor::PreProcessBasicBlock( - MaglevCompilationInfo* compilation_info, BasicBlock* block) { - MaglevGraphLabeller* graph_labeller = compilation_info->graph_labeller(); - +void MaglevPrintingVisitor::PreProcessBasicBlock(BasicBlock* block) { size_t loop_position = static_cast(-1); if (loop_headers_.erase(block) > 0) { loop_position = AddTarget(targets_, block); @@ -355,7 +353,7 @@ void MaglevPrintingVisitor::PreProcessBasicBlock( if (FLAG_log_colour) os_ << "\033[0m"; } - int block_id = graph_labeller->BlockId(block); + int block_id = graph_labeller_->BlockId(block); os_ << "Block b" << block_id; if (block->is_exception_handler_block()) { os_ << " (exception handler)"; @@ -369,10 +367,8 @@ namespace { template void PrintEagerDeopt(std::ostream& os, std::vector targets, - NodeT* node, const ProcessingState& state, + NodeT* node, MaglevGraphLabeller* graph_labeller, int max_node_id) { - MaglevGraphLabeller* graph_labeller = state.graph_labeller(); - PrintVerticalArrows(os, targets); PrintPadding(os, graph_labeller, max_node_id, 0); @@ -394,15 +390,15 @@ void PrintEagerDeopt(std::ostream& os, std::vector targets, os << "}\n"; } void MaybePrintEagerDeopt(std::ostream& os, std::vector targets, - NodeBase* node, const ProcessingState& state, + NodeBase* node, MaglevGraphLabeller* graph_labeller, int max_node_id) { switch (node->opcode()) { -#define CASE(Name) \ - case Opcode::k##Name: \ - if constexpr (Name::kProperties.can_eager_deopt()) { \ - PrintEagerDeopt(os, targets, node->Cast(), state, \ - max_node_id); \ - } \ +#define CASE(Name) \ + case Opcode::k##Name: \ + if constexpr (Name::kProperties.can_eager_deopt()) { \ + PrintEagerDeopt(os, targets, node->Cast(), graph_labeller, \ + max_node_id); \ + } \ break; NODE_BASE_LIST(CASE) #undef CASE @@ -411,10 +407,8 @@ void MaybePrintEagerDeopt(std::ostream& os, std::vector targets, template void PrintLazyDeopt(std::ostream& os, std::vector targets, - NodeT* node, const ProcessingState& state, + NodeT* node, MaglevGraphLabeller* graph_labeller, int max_node_id) { - MaglevGraphLabeller* graph_labeller = state.graph_labeller(); - PrintVerticalArrows(os, targets); PrintPadding(os, graph_labeller, max_node_id, 0); @@ -444,7 +438,8 @@ void PrintLazyDeopt(std::ostream& os, std::vector targets, template void PrintExceptionHandlerPoint(std::ostream& os, std::vector targets, NodeT* node, - const ProcessingState& state, int max_node_id) { + MaglevGraphLabeller* graph_labeller, + int max_node_id) { // If no handler info, then we cannot throw. ExceptionHandlerInfo* info = node->exception_handler_info(); if (!info->HasExceptionHandler()) return; @@ -463,8 +458,6 @@ void PrintExceptionHandlerPoint(std::ostream& os, auto* liveness = block->state()->frame_state().liveness(); LazyDeoptInfo* deopt_info = node->lazy_deopt_info(); - MaglevGraphLabeller* graph_labeller = state.graph_labeller(); - PrintVerticalArrows(os, targets); PrintPadding(os, graph_labeller, max_node_id, 0); @@ -489,19 +482,19 @@ void PrintExceptionHandlerPoint(std::ostream& os, void MaybePrintLazyDeoptOrExceptionHandler(std::ostream& os, std::vector targets, NodeBase* node, - const ProcessingState& state, + MaglevGraphLabeller* graph_labeller, int max_node_id) { switch (node->opcode()) { -#define CASE(Name) \ - case Opcode::k##Name: \ - if constexpr (Name::kProperties.can_lazy_deopt()) { \ - PrintLazyDeopt(os, targets, node->Cast(), state, \ - max_node_id); \ - } \ - if constexpr (Name::kProperties.can_throw()) { \ - PrintExceptionHandlerPoint(os, targets, node->Cast(), state, \ - max_node_id); \ - } \ +#define CASE(Name) \ + case Opcode::k##Name: \ + if constexpr (Name::kProperties.can_lazy_deopt()) { \ + PrintLazyDeopt(os, targets, node->Cast(), graph_labeller, \ + max_node_id); \ + } \ + if constexpr (Name::kProperties.can_throw()) { \ + PrintExceptionHandlerPoint(os, targets, node->Cast(), \ + graph_labeller, max_node_id); \ + } \ break; NODE_BASE_LIST(CASE) #undef CASE @@ -511,10 +504,8 @@ void MaybePrintLazyDeoptOrExceptionHandler(std::ostream& os, } // namespace void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) { - MaglevGraphLabeller* graph_labeller = state.graph_labeller(); - PrintVerticalArrows(os_, targets_); - PrintPaddedId(os_, graph_labeller, max_node_id_, phi); + PrintPaddedId(os_, graph_labeller_, max_node_id_, phi); if (phi->input_count() == 0) { os_ << "φₑ " << phi->owner().ToString(); } else { @@ -524,37 +515,34 @@ void MaglevPrintingVisitor::Process(Phi* phi, const ProcessingState& state) { // moves). for (int i = 0; i < phi->input_count(); ++i) { if (i > 0) os_ << ", "; - os_ << PrintNodeLabel(graph_labeller, phi->input(i).node()); + os_ << PrintNodeLabel(graph_labeller_, phi->input(i).node()); } os_ << ")"; } os_ << " → " << phi->result().operand() << "\n"; MaglevPrintingVisitorOstream::cast(os_for_additional_info_) - ->set_padding(MaxIdWidth(graph_labeller, max_node_id_, 2)); + ->set_padding(MaxIdWidth(graph_labeller_, max_node_id_, 2)); } void MaglevPrintingVisitor::Process(Node* node, const ProcessingState& state) { - MaglevGraphLabeller* graph_labeller = state.graph_labeller(); - - MaybePrintEagerDeopt(os_, targets_, node, state, max_node_id_); + MaybePrintEagerDeopt(os_, targets_, node, graph_labeller_, max_node_id_); PrintVerticalArrows(os_, targets_); - PrintPaddedId(os_, graph_labeller, max_node_id_, node); - os_ << PrintNode(graph_labeller, node) << "\n"; + PrintPaddedId(os_, graph_labeller_, max_node_id_, node); + os_ << PrintNode(graph_labeller_, node) << "\n"; MaglevPrintingVisitorOstream::cast(os_for_additional_info_) - ->set_padding(MaxIdWidth(graph_labeller, max_node_id_, 2)); + ->set_padding(MaxIdWidth(graph_labeller_, max_node_id_, 2)); - MaybePrintLazyDeoptOrExceptionHandler(os_, targets_, node, state, + MaybePrintLazyDeoptOrExceptionHandler(os_, targets_, node, graph_labeller_, max_node_id_); } void MaglevPrintingVisitor::Process(ControlNode* control_node, const ProcessingState& state) { - MaglevGraphLabeller* graph_labeller = state.graph_labeller(); - - MaybePrintEagerDeopt(os_, targets_, control_node, state, max_node_id_); + MaybePrintEagerDeopt(os_, targets_, control_node, graph_labeller_, + max_node_id_); bool has_fallthrough = false; @@ -563,7 +551,7 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node, PrintVerticalArrows(os_, targets_, {}, {target}, true); os_ << "◄─"; - PrintPaddedId(os_, graph_labeller, max_node_id_, control_node, "─", -2); + PrintPaddedId(os_, graph_labeller_, max_node_id_, control_node, "─", -2); std::replace(targets_.begin(), targets_.end(), target, static_cast(nullptr)); @@ -575,7 +563,7 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node, has_fallthrough |= !AddTargetIfNotNext(targets_, target, state.next_block(), &arrows_starting_here); PrintVerticalArrows(os_, targets_, arrows_starting_here); - PrintPaddedId(os_, graph_labeller, max_node_id_, control_node, + PrintPaddedId(os_, graph_labeller_, max_node_id_, control_node, has_fallthrough ? " " : "─"); } else if (control_node->Is()) { @@ -590,7 +578,7 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node, has_fallthrough |= !AddTargetIfNotNext( targets_, true_target, state.next_block(), &arrows_starting_here); PrintVerticalArrows(os_, targets_, arrows_starting_here); - PrintPaddedId(os_, graph_labeller, max_node_id_, control_node, "─"); + PrintPaddedId(os_, graph_labeller_, max_node_id_, control_node, "─"); } else if (control_node->Is()) { std::set arrows_starting_here; for (int i = 0; i < control_node->Cast()->size(); i++) { @@ -609,14 +597,14 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node, } PrintVerticalArrows(os_, targets_, arrows_starting_here); - PrintPaddedId(os_, graph_labeller, max_node_id_, control_node, "─"); + PrintPaddedId(os_, graph_labeller_, max_node_id_, control_node, "─"); } else { PrintVerticalArrows(os_, targets_); - PrintPaddedId(os_, graph_labeller, max_node_id_, control_node); + PrintPaddedId(os_, graph_labeller_, max_node_id_, control_node); } - os_ << PrintNode(graph_labeller, control_node) << "\n"; + os_ << PrintNode(graph_labeller_, control_node) << "\n"; bool printed_phis = false; if (control_node->Is()) { @@ -625,22 +613,22 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node, if (target->has_phi()) { printed_phis = true; PrintVerticalArrows(os_, targets_); - PrintPadding(os_, graph_labeller, max_node_id_, -1); + PrintPadding(os_, graph_labeller_, max_node_id_, -1); os_ << (has_fallthrough ? "│" : " "); os_ << " with gap moves:\n"; int pid = state.block()->predecessor_id(); for (Phi* phi : *target->phis()) { PrintVerticalArrows(os_, targets_); - PrintPadding(os_, graph_labeller, max_node_id_, -1); + PrintPadding(os_, graph_labeller_, max_node_id_, -1); os_ << (has_fallthrough ? "│" : " "); os_ << " - "; - graph_labeller->PrintInput(os_, phi->input(pid)); - os_ << " → " << graph_labeller->NodeId(phi) << ": φ " + graph_labeller_->PrintInput(os_, phi->input(pid)); + os_ << " → " << graph_labeller_->NodeId(phi) << ": φ " << phi->result().operand() << "\n"; } if (target->state()->register_state().is_initialized()) { PrintVerticalArrows(os_, targets_); - PrintPadding(os_, graph_labeller, max_node_id_, -1); + PrintPadding(os_, graph_labeller_, max_node_id_, -1); os_ << (has_fallthrough ? "│" : " "); os_ << " with register merges:\n"; auto print_register_merges = [&](auto reg, RegisterState& state) { @@ -649,7 +637,7 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node, if (LoadMergeState(state, &node, &merge)) { compiler::InstructionOperand source = merge->operand(pid); PrintVerticalArrows(os_, targets_); - PrintPadding(os_, graph_labeller, max_node_id_, -1); + PrintPadding(os_, graph_labeller_, max_node_id_, -1); os_ << (has_fallthrough ? "│" : " "); os_ << " - " << source << " → " << reg << "\n"; } @@ -664,7 +652,7 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node, PrintVerticalArrows(os_, targets_); if (has_fallthrough) { - PrintPadding(os_, graph_labeller, max_node_id_, -1); + PrintPadding(os_, graph_labeller_, max_node_id_, -1); if (printed_phis) { os_ << "▼"; } else { @@ -676,12 +664,13 @@ void MaglevPrintingVisitor::Process(ControlNode* control_node, // TODO(leszeks): Allow MaglevPrintingVisitorOstream to print the arrowhead // so that it overlaps the fallthrough arrow. MaglevPrintingVisitorOstream::cast(os_for_additional_info_) - ->set_padding(MaxIdWidth(graph_labeller, max_node_id_, 2)); + ->set_padding(MaxIdWidth(graph_labeller_, max_node_id_, 2)); } void PrintGraph(std::ostream& os, MaglevCompilationInfo* compilation_info, Graph* const graph) { - GraphProcessor printer(compilation_info, os); + GraphProcessor printer( + compilation_info->graph_labeller(), os); printer.ProcessGraph(graph); } diff --git a/src/maglev/maglev-graph-printer.h b/src/maglev/maglev-graph-printer.h index 34f8e0b070..16a6b9a0c5 100644 --- a/src/maglev/maglev-graph-printer.h +++ b/src/maglev/maglev-graph-printer.h @@ -28,11 +28,12 @@ class ProcessingState; class MaglevPrintingVisitor { public: - explicit MaglevPrintingVisitor(std::ostream& os); + explicit MaglevPrintingVisitor(MaglevGraphLabeller* graph_labeller, + std::ostream& os); - void PreProcessGraph(MaglevCompilationInfo*, Graph* graph); - void PostProcessGraph(MaglevCompilationInfo*, Graph* graph) {} - void PreProcessBasicBlock(MaglevCompilationInfo*, BasicBlock* block); + void PreProcessGraph(Graph* graph); + void PostProcessGraph(Graph* graph) {} + void PreProcessBasicBlock(BasicBlock* block); void Process(Phi* phi, const ProcessingState& state); void Process(Node* node, const ProcessingState& state); void Process(ControlNode* node, const ProcessingState& state); @@ -40,6 +41,7 @@ class MaglevPrintingVisitor { std::ostream& os() { return *os_for_additional_info_; } private: + MaglevGraphLabeller* graph_labeller_; std::ostream& os_; std::unique_ptr os_for_additional_info_; std::set loop_headers_; diff --git a/src/maglev/maglev-graph-processor.h b/src/maglev/maglev-graph-processor.h index 61fe53a389..60fa90c786 100644 --- a/src/maglev/maglev-graph-processor.h +++ b/src/maglev/maglev-graph-processor.h @@ -19,20 +19,19 @@ namespace maglev { // The GraphProcessor takes a NodeProcessor, and applies it to each Node in the // Graph by calling NodeProcessor::Process on each Node. // -// The GraphProcessor also keeps track of the current ProcessingState, including -// the inferred corresponding InterpreterFrameState and (optionally) the state -// at the most recent Checkpoint, and passes this to the Process method. +// The GraphProcessor also keeps track of the current ProcessingState, and +// passes this to the Process method. // // It expects a NodeProcessor class with: // // // A function that processes the graph before the nodes are walked. -// void PreProcessGraph(MaglevCompilationInfo*, Graph* graph); +// void PreProcessGraph(Graph* graph); // // // A function that processes the graph after the nodes are walked. -// void PostProcessGraph(MaglevCompilationInfo*, Graph* graph); +// void PostProcessGraph(Graph* graph); // // // A function that processes each basic block before its nodes are walked. -// void PreProcessBasicBlock(MaglevCompilationInfo*, BasicBlock* block); +// void PreProcessBasicBlock(BasicBlock* block); // // // Process methods for each Node type. The GraphProcessor switches over // // the Node's opcode, casts it to the appropriate FooNode, and dispatches @@ -46,9 +45,7 @@ class GraphProcessor; class ProcessingState { public: - explicit ProcessingState(MaglevCompilationInfo* compilation_info, - BlockConstIterator block_it) - : compilation_info_(compilation_info), block_it_(block_it) {} + explicit ProcessingState(BlockConstIterator block_it) : block_it_(block_it) {} // Disallow copies, since the underlying frame states stay mutable. ProcessingState(const ProcessingState&) = delete; @@ -57,14 +54,7 @@ class ProcessingState { BasicBlock* block() const { return *block_it_; } BasicBlock* next_block() const { return *(block_it_ + 1); } - MaglevCompilationInfo* compilation_info() const { return compilation_info_; } - - MaglevGraphLabeller* graph_labeller() const { - return compilation_info_->graph_labeller(); - } - private: - MaglevCompilationInfo* compilation_info_; BlockConstIterator block_it_; }; @@ -72,15 +62,13 @@ template class GraphProcessor { public: template - explicit GraphProcessor(MaglevCompilationInfo* compilation_info, - Args&&... args) - : compilation_info_(compilation_info), - node_processor_(std::forward(args)...) {} + explicit GraphProcessor(Args&&... args) + : node_processor_(std::forward(args)...) {} void ProcessGraph(Graph* graph) { graph_ = graph; - node_processor_.PreProcessGraph(compilation_info_, graph); + node_processor_.PreProcessGraph(graph); for (const auto& [ref, constant] : graph->constants()) { node_processor_.Process(constant, GetCurrentState()); @@ -106,7 +94,7 @@ class GraphProcessor { for (block_it_ = graph->begin(); block_it_ != graph->end(); ++block_it_) { BasicBlock* block = *block_it_; - node_processor_.PreProcessBasicBlock(compilation_info_, block); + node_processor_.PreProcessBasicBlock(block); if (block->has_phi()) { for (Phi* phi : *block->phis()) { @@ -123,16 +111,14 @@ class GraphProcessor { ProcessNodeBase(block->control_node(), GetCurrentState()); } - node_processor_.PostProcessGraph(compilation_info_, graph); + node_processor_.PostProcessGraph(graph); } NodeProcessor& node_processor() { return node_processor_; } const NodeProcessor& node_processor() const { return node_processor_; } private: - ProcessingState GetCurrentState() { - return ProcessingState(compilation_info_, block_it_); - } + ProcessingState GetCurrentState() { return ProcessingState(block_it_); } void ProcessNodeBase(NodeBase* node, const ProcessingState& state) { switch (node->opcode()) { @@ -148,7 +134,6 @@ class GraphProcessor { void PreProcess(NodeBase* node, const ProcessingState& state) {} - MaglevCompilationInfo* const compilation_info_; NodeProcessor node_processor_; Graph* graph_; BlockConstIterator block_it_; @@ -163,9 +148,9 @@ class NodeMultiProcessor; template <> class NodeMultiProcessor<> { public: - void PreProcessGraph(MaglevCompilationInfo*, Graph* graph) {} - void PostProcessGraph(MaglevCompilationInfo*, Graph* graph) {} - void PreProcessBasicBlock(MaglevCompilationInfo*, BasicBlock* block) {} + void PreProcessGraph(Graph* graph) {} + void PostProcessGraph(Graph* graph) {} + void PreProcessBasicBlock(BasicBlock* block) {} void Process(NodeBase* node, const ProcessingState& state) {} }; @@ -175,23 +160,31 @@ class NodeMultiProcessor using Base = NodeMultiProcessor; public: + template + explicit NodeMultiProcessor(Processor&& processor, Args&&... processors) + : Base(std::forward(processors)...), + processor_(std::forward(processor)) {} + template + explicit NodeMultiProcessor(Args&&... processors) + : Base(std::forward(processors)...) {} + template void Process(Node* node, const ProcessingState& state) { processor_.Process(node, state); Base::Process(node, state); } - void PreProcessGraph(MaglevCompilationInfo* info, Graph* graph) { - processor_.PreProcessGraph(info, graph); - Base::PreProcessGraph(info, graph); + void PreProcessGraph(Graph* graph) { + processor_.PreProcessGraph(graph); + Base::PreProcessGraph(graph); } - void PostProcessGraph(MaglevCompilationInfo* info, Graph* graph) { + void PostProcessGraph(Graph* graph) { // Post process in reverse order because that kind of makes sense. - Base::PostProcessGraph(info, graph); - processor_.PostProcessGraph(info, graph); + Base::PostProcessGraph(graph); + processor_.PostProcessGraph(graph); } - void PreProcessBasicBlock(MaglevCompilationInfo* info, BasicBlock* block) { - processor_.PreProcessBasicBlock(info, block); - Base::PreProcessBasicBlock(info, block); + void PreProcessBasicBlock(BasicBlock* block) { + processor_.PreProcessBasicBlock(block); + Base::PreProcessBasicBlock(block); } private: diff --git a/src/maglev/maglev-graph-verifier.h b/src/maglev/maglev-graph-verifier.h index e837e640bc..af7c716c79 100644 --- a/src/maglev/maglev-graph-verifier.h +++ b/src/maglev/maglev-graph-verifier.h @@ -49,14 +49,15 @@ class Graph; // are expected to be tagged/untagged. Add more verification later. class MaglevGraphVerifier { public: - void PreProcessGraph(MaglevCompilationInfo* compilation_info, Graph* graph) { + explicit MaglevGraphVerifier(MaglevCompilationInfo* compilation_info) { if (compilation_info->has_graph_labeller()) { graph_labeller_ = compilation_info->graph_labeller(); } } - void PostProcessGraph(MaglevCompilationInfo*, Graph* graph) {} - void PreProcessBasicBlock(MaglevCompilationInfo*, BasicBlock* block) {} + void PreProcessGraph(Graph* graph) {} + void PostProcessGraph(Graph* graph) {} + void PreProcessBasicBlock(BasicBlock* block) {} void CheckValueInputIs(NodeBase* node, int i, ValueRepresentation expected) { ValueNode* input = node->input(i).node(); diff --git a/src/maglev/maglev-regalloc.cc b/src/maglev/maglev-regalloc.cc index 63d88529d8..c6f17e0f8c 100644 --- a/src/maglev/maglev-regalloc.cc +++ b/src/maglev/maglev-regalloc.cc @@ -284,8 +284,9 @@ void StraightForwardRegisterAllocator::PrintLiveRegs() const { void StraightForwardRegisterAllocator::AllocateRegisters() { if (FLAG_trace_maglev_regalloc) { - printing_visitor_.reset(new MaglevPrintingVisitor(std::cout)); - printing_visitor_->PreProcessGraph(compilation_info_, graph_); + printing_visitor_.reset(new MaglevPrintingVisitor( + compilation_info_->graph_labeller(), std::cout)); + printing_visitor_->PreProcessGraph(graph_); } for (const auto& [ref, constant] : graph_->constants()) { @@ -326,7 +327,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() { } if (FLAG_trace_maglev_regalloc) { - printing_visitor_->PreProcessBasicBlock(compilation_info_, block); + printing_visitor_->PreProcessBasicBlock(block); printing_visitor_->os() << "live regs: "; PrintLiveRegs(); @@ -390,8 +391,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() { if (phi->owner() == interpreter::Register::virtual_accumulator()) { phi->result().SetAllocated(ForceAllocate(kReturnRegister0, phi)); if (FLAG_trace_maglev_regalloc) { - printing_visitor_->Process( - phi, ProcessingState(compilation_info_, block_it_)); + printing_visitor_->Process(phi, ProcessingState(block_it_)); printing_visitor_->os() << "phi (exception message object) " << phi->result().operand() << std::endl; } @@ -411,8 +411,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() { general_registers_.AllocateRegister(phi); phi->result().SetAllocated(allocation); if (FLAG_trace_maglev_regalloc) { - printing_visitor_->Process( - phi, ProcessingState(compilation_info_, block_it_)); + printing_visitor_->Process(phi, ProcessingState(block_it_)); printing_visitor_->os() << "phi (new reg) " << phi->result().operand() << std::endl; } @@ -429,8 +428,7 @@ void StraightForwardRegisterAllocator::AllocateRegisters() { // TODO(verwaest): Will this be used at all? phi->result().SetAllocated(phi->spill_slot()); if (FLAG_trace_maglev_regalloc) { - printing_visitor_->Process( - phi, ProcessingState(compilation_info_, block_it_)); + printing_visitor_->Process(phi, ProcessingState(block_it_)); printing_visitor_->os() << "phi (stack) " << phi->result().operand() << std::endl; } @@ -606,8 +604,7 @@ void StraightForwardRegisterAllocator::AllocateNode(Node* node) { if (node->properties().needs_register_snapshot()) SaveRegisterSnapshot(node); if (FLAG_trace_maglev_regalloc) { - printing_visitor_->Process(node, - ProcessingState(compilation_info_, block_it_)); + printing_visitor_->Process(node, ProcessingState(block_it_)); printing_visitor_->os() << "live regs: "; PrintLiveRegs(); printing_visitor_->os() << "\n"; @@ -880,8 +877,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, DCHECK_EQ(node->properties(), OpProperties(0)); if (FLAG_trace_maglev_regalloc) { - printing_visitor_->Process(node, - ProcessingState(compilation_info_, block_it_)); + printing_visitor_->Process(node, ProcessingState(block_it_)); } } else if (node->Is()) { // No fixed temporaries. @@ -893,8 +889,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, UpdateUse(*node->eager_deopt_info()); if (FLAG_trace_maglev_regalloc) { - printing_visitor_->Process(node, - ProcessingState(compilation_info_, block_it_)); + printing_visitor_->Process(node, ProcessingState(block_it_)); } } else if (auto unconditional = node->TryCast()) { // No fixed temporaries. @@ -932,8 +927,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, } if (FLAG_trace_maglev_regalloc) { - printing_visitor_->Process(node, - ProcessingState(compilation_info_, block_it_)); + printing_visitor_->Process(node, ProcessingState(block_it_)); } } else { DCHECK(node->Is() || node->Is()); @@ -956,8 +950,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node, VerifyRegisterState(); if (FLAG_trace_maglev_regalloc) { - printing_visitor_->Process(node, - ProcessingState(compilation_info_, block_it_)); + printing_visitor_->Process(node, ProcessingState(block_it_)); } // Finally, initialize the merge states of branch targets, including the @@ -991,8 +984,7 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) { phi->result().SetAllocated(ForceAllocate(reg, phi)); DCHECK_EQ(general_registers_.GetValue(reg), phi); if (FLAG_trace_maglev_regalloc) { - printing_visitor_->Process( - phi, ProcessingState(compilation_info_, block_it_)); + printing_visitor_->Process(phi, ProcessingState(block_it_)); printing_visitor_->os() << "phi (reuse) " << input.operand() << std::endl; } diff --git a/src/maglev/maglev-vreg-allocator.h b/src/maglev/maglev-vreg-allocator.h index cc968a8bb8..014c69dad3 100644 --- a/src/maglev/maglev-vreg-allocator.h +++ b/src/maglev/maglev-vreg-allocator.h @@ -26,8 +26,8 @@ class MaglevVregAllocationState { class MaglevVregAllocator { public: - void PreProcessGraph(MaglevCompilationInfo*, Graph* graph) {} - void PostProcessGraph(MaglevCompilationInfo*, Graph* graph) { + void PreProcessGraph(Graph* graph) {} + void PostProcessGraph(Graph* graph) { for (BasicBlock* block : *graph) { if (!block->has_phi()) continue; for (Phi* phi : *block->phis()) { @@ -35,7 +35,7 @@ class MaglevVregAllocator { } } } - void PreProcessBasicBlock(MaglevCompilationInfo*, BasicBlock* block) {} + void PreProcessBasicBlock(BasicBlock* block) {} #define DEF_PROCESS_NODE(NAME) \ void Process(NAME* node, const ProcessingState& state) { \ From c894fee0c7043fe393b063d815de9f9df1a99d28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marja=20H=C3=B6ltt=C3=A4?= Date: Wed, 14 Sep 2022 10:09:28 +0200 Subject: [PATCH 0095/1772] [baseline] Use a more obvious error message when the accumulator is clobbered MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This error type is very common and deserves its own error message instead of the generic "Unexpected value" one. Change-Id: I07a0de8b190db58e97fae98d0f7347872efd9995 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892694 Commit-Queue: Marja Hölttä Reviewed-by: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83180} --- src/baseline/arm/baseline-assembler-arm-inl.h | 2 +- src/baseline/arm64/baseline-assembler-arm64-inl.h | 2 +- src/baseline/ia32/baseline-assembler-ia32-inl.h | 2 +- src/baseline/loong64/baseline-assembler-loong64-inl.h | 2 +- src/baseline/mips64/baseline-assembler-mips64-inl.h | 2 +- src/baseline/ppc/baseline-assembler-ppc-inl.h | 2 +- src/baseline/riscv/baseline-assembler-riscv-inl.h | 2 +- src/baseline/s390/baseline-assembler-s390-inl.h | 2 +- src/baseline/x64/baseline-assembler-x64-inl.h | 2 +- src/codegen/bailout-reason.h | 1 + 10 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/baseline/arm/baseline-assembler-arm-inl.h b/src/baseline/arm/baseline-assembler-arm-inl.h index 294c536073..5cb855e416 100644 --- a/src/baseline/arm/baseline-assembler-arm-inl.h +++ b/src/baseline/arm/baseline-assembler-arm-inl.h @@ -610,7 +610,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( Register reg) { assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister); - assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue); + assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered); } } // namespace baseline diff --git a/src/baseline/arm64/baseline-assembler-arm64-inl.h b/src/baseline/arm64/baseline-assembler-arm64-inl.h index be335a41e2..08a9491ce8 100644 --- a/src/baseline/arm64/baseline-assembler-arm64-inl.h +++ b/src/baseline/arm64/baseline-assembler-arm64-inl.h @@ -677,7 +677,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( Register reg) { assembler_->masm()->CmpTagged(reg, kInterpreterAccumulatorRegister); - assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue); + assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered); } } // namespace baseline diff --git a/src/baseline/ia32/baseline-assembler-ia32-inl.h b/src/baseline/ia32/baseline-assembler-ia32-inl.h index 5dfd80d52a..2c63dac2b6 100644 --- a/src/baseline/ia32/baseline-assembler-ia32-inl.h +++ b/src/baseline/ia32/baseline-assembler-ia32-inl.h @@ -577,7 +577,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( Register reg) { assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister); - assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue); + assembler_->masm()->Assert(equal, AbortReason::kAccumulatorClobbered); } } // namespace baseline diff --git a/src/baseline/loong64/baseline-assembler-loong64-inl.h b/src/baseline/loong64/baseline-assembler-loong64-inl.h index d33ff34d4b..cc5694554a 100644 --- a/src/baseline/loong64/baseline-assembler-loong64-inl.h +++ b/src/baseline/loong64/baseline-assembler-loong64-inl.h @@ -569,7 +569,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( Register reg) { - assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg, + assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered, reg, Operand(kInterpreterAccumulatorRegister)); } diff --git a/src/baseline/mips64/baseline-assembler-mips64-inl.h b/src/baseline/mips64/baseline-assembler-mips64-inl.h index b828ea3bdc..17bd834d5d 100644 --- a/src/baseline/mips64/baseline-assembler-mips64-inl.h +++ b/src/baseline/mips64/baseline-assembler-mips64-inl.h @@ -581,7 +581,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( Register reg) { - assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg, + assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered, reg, Operand(kInterpreterAccumulatorRegister)); } diff --git a/src/baseline/ppc/baseline-assembler-ppc-inl.h b/src/baseline/ppc/baseline-assembler-ppc-inl.h index d241408459..abe99c34cf 100644 --- a/src/baseline/ppc/baseline-assembler-ppc-inl.h +++ b/src/baseline/ppc/baseline-assembler-ppc-inl.h @@ -754,7 +754,7 @@ inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( } else { assembler_->masm()->CmpU64(reg, kInterpreterAccumulatorRegister); } - assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue); + assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered); } } // namespace baseline diff --git a/src/baseline/riscv/baseline-assembler-riscv-inl.h b/src/baseline/riscv/baseline-assembler-riscv-inl.h index 87afe5775d..59cffa9e29 100644 --- a/src/baseline/riscv/baseline-assembler-riscv-inl.h +++ b/src/baseline/riscv/baseline-assembler-riscv-inl.h @@ -602,7 +602,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( Register reg) { - assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg, + assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered, reg, Operand(kInterpreterAccumulatorRegister)); } } // namespace baseline diff --git a/src/baseline/s390/baseline-assembler-s390-inl.h b/src/baseline/s390/baseline-assembler-s390-inl.h index a7ec8d5ac7..ae77a00b54 100644 --- a/src/baseline/s390/baseline-assembler-s390-inl.h +++ b/src/baseline/s390/baseline-assembler-s390-inl.h @@ -752,7 +752,7 @@ inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( } else { assembler_->masm()->CmpU64(reg, kInterpreterAccumulatorRegister); } - assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue); + assembler_->masm()->Assert(eq, AbortReason::kAccumulatorClobbered); } } // namespace baseline diff --git a/src/baseline/x64/baseline-assembler-x64-inl.h b/src/baseline/x64/baseline-assembler-x64-inl.h index e80b48be8f..f05d829cb5 100644 --- a/src/baseline/x64/baseline-assembler-x64-inl.h +++ b/src/baseline/x64/baseline-assembler-x64-inl.h @@ -639,7 +639,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) { inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( Register reg) { assembler_->masm()->cmp_tagged(reg, kInterpreterAccumulatorRegister); - assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue); + assembler_->masm()->Assert(equal, AbortReason::kAccumulatorClobbered); } } // namespace baseline diff --git a/src/codegen/bailout-reason.h b/src/codegen/bailout-reason.h index 8a3eae2dc8..35056c4137 100644 --- a/src/codegen/bailout-reason.h +++ b/src/codegen/bailout-reason.h @@ -17,6 +17,7 @@ namespace internal { "32 bit value in register is not zero-extended") \ V(kSignedBitOfSmiIsNotZero, "Signed bit of 31 bit smi register is not zero") \ V(kAPICallReturnedInvalidObject, "API call returned invalid object") \ + V(kAccumulatorClobbered, "Accumulator clobbered") \ V(kAllocatingNonEmptyPackedArray, "Allocating non-empty packed array") \ V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned") \ V(kExpectedOptimizationSentinel, \ From 33e90400d095ffdcf0c75fab56fd61ebfbb7d4e6 Mon Sep 17 00:00:00 2001 From: Jakob Linke Date: Wed, 14 Sep 2022 10:48:04 +0200 Subject: [PATCH 0096/1772] [maglev] Restore the correct context for exception handlers Ignition remembers the correct context to restore when entering an exception handler by moving the context to an interpreter register when entering a try block, and restoring it from there when unwinding the frame and entering the catch block. Maglev code has to do the same by taking the context from the appropriate register for the handler's frame state. Bug: v8:7700 Change-Id: I294fcccc845c660b2289b6d7b40f49f1aa46283d Fixed: chromium:1359928 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892352 Reviewed-by: Leszek Swirski Auto-Submit: Jakob Linke Commit-Queue: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83181} --- src/maglev/maglev-graph-builder.cc | 8 ++++-- src/maglev/maglev-interpreter-frame-state.cc | 6 ++-- src/maglev/maglev-interpreter-frame-state.h | 2 +- .../mjsunit/maglev/regress/regress-1359928.js | 28 +++++++++++++++++++ 4 files changed, 37 insertions(+), 7 deletions(-) create mode 100644 test/mjsunit/maglev/regress/regress-1359928.js diff --git a/src/maglev/maglev-graph-builder.cc b/src/maglev/maglev-graph-builder.cc index daef5c09e1..27d0b7d75d 100644 --- a/src/maglev/maglev-graph-builder.cc +++ b/src/maglev/maglev-graph-builder.cc @@ -144,17 +144,19 @@ void MaglevGraphBuilder::BuildMergeStates() { if (bytecode().handler_table_size() > 0) { HandlerTable table(*bytecode().object()); for (int i = 0; i < table.NumberOfRangeEntries(); i++) { - int offset = table.GetRangeHandler(i); + const int offset = table.GetRangeHandler(i); + const interpreter::Register context_reg(table.GetRangeData(i)); const compiler::BytecodeLivenessState* liveness = GetInLivenessFor(offset); DCHECK_EQ(NumPredecessors(offset), 0); DCHECK_NULL(merge_states_[offset]); if (FLAG_trace_maglev_graph_building) { std::cout << "- Creating exception merge state at @" << offset - << std::endl; + << ", context register r" << context_reg.index() << std::endl; } merge_states_[offset] = MergePointInterpreterFrameState::NewForCatchBlock( - *compilation_unit_, liveness, offset, graph_, is_inline()); + *compilation_unit_, liveness, offset, context_reg, graph_, + is_inline()); } } } diff --git a/src/maglev/maglev-interpreter-frame-state.cc b/src/maglev/maglev-interpreter-frame-state.cc index ec6caabbfe..a2ceb02607 100644 --- a/src/maglev/maglev-interpreter-frame-state.cc +++ b/src/maglev/maglev-interpreter-frame-state.cc @@ -16,7 +16,7 @@ MergePointInterpreterFrameState* MergePointInterpreterFrameState::NewForCatchBlock( const MaglevCompilationUnit& unit, const compiler::BytecodeLivenessState* liveness, int handler_offset, - Graph* graph, bool is_inline) { + interpreter::Register context_register, Graph* graph, bool is_inline) { Zone* const zone = unit.zone(); MergePointInterpreterFrameState* state = zone->New( @@ -49,8 +49,8 @@ MergePointInterpreterFrameState::NewForCatchBlock( entry = state->NewExceptionPhi(zone, reg, handler_offset); } }); - frame_state.context(unit) = state->NewExceptionPhi( - zone, interpreter::Register::current_context(), handler_offset); + frame_state.context(unit) = + state->NewExceptionPhi(zone, context_register, handler_offset); frame_state.ForEachLocal( unit, [&](ValueNode*& entry, interpreter::Register reg) { entry = state->NewExceptionPhi(zone, reg, handler_offset); diff --git a/src/maglev/maglev-interpreter-frame-state.h b/src/maglev/maglev-interpreter-frame-state.h index 31f2bbe959..8ddda35edd 100644 --- a/src/maglev/maglev-interpreter-frame-state.h +++ b/src/maglev/maglev-interpreter-frame-state.h @@ -483,7 +483,7 @@ class MergePointInterpreterFrameState { static MergePointInterpreterFrameState* NewForCatchBlock( const MaglevCompilationUnit& unit, const compiler::BytecodeLivenessState* liveness, int handler_offset, - Graph* graph, bool is_inline); + interpreter::Register context_register, Graph* graph, bool is_inline); // Merges an unmerged framestate with a possibly merged framestate into |this| // framestate. diff --git a/test/mjsunit/maglev/regress/regress-1359928.js b/test/mjsunit/maglev/regress/regress-1359928.js new file mode 100644 index 0000000000..3f46ab9eb0 --- /dev/null +++ b/test/mjsunit/maglev/regress/regress-1359928.js @@ -0,0 +1,28 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --allow-natives-syntax --no-concurrent-recompilation + +function __f_0() { + var __v_1; + try { + class __c_0 extends (__v_4) {} + } catch { + console.log("soozie"); + } + try { + Object.defineProperty(__v_2, 'x'); + } catch {} + try { + console.log("foozie"); + class __c_2 extends (eval('delete obj.x'), class {}) {} + } catch (__v_7) { + console.log("boozie"); + __v_1 = __v_7; + } +} +%PrepareFunctionForOptimization(__f_0); +__f_0(); +%OptimizeMaglevOnNextCall(__f_0); +__f_0(); From 74c2cec6ca8302a3d2a8359681a21fb47693dc1a Mon Sep 17 00:00:00 2001 From: Greg Thompson Date: Wed, 14 Sep 2022 08:48:26 +0200 Subject: [PATCH 0097/1772] [fuchsia] Include what you use fixes in test/unittests/BUILD.gn Bug: chromium:1092804 Change-Id: I9f4385d00af464eb2b9251b7c1dcfe0d4b69cdf2 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891279 Auto-Submit: Greg Thompson Commit-Queue: Igor Sheludko Reviewed-by: Igor Sheludko Cr-Commit-Position: refs/heads/main@{#83182} --- test/unittests/BUILD.gn | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn index fbc7dd5368..f5fa21f23a 100644 --- a/test/unittests/BUILD.gn +++ b/test/unittests/BUILD.gn @@ -5,7 +5,9 @@ import("../../gni/v8.gni") if (is_fuchsia) { - import("//build/config/fuchsia/rules.gni") + import("//build/config/fuchsia/generate_runner_scripts.gni") + import("//third_party/fuchsia-sdk/sdk/build/component.gni") + import("//third_party/fuchsia-sdk/sdk/build/package.gni") fuchsia_component("v8_unittests_component") { testonly = true From f2b98fa8bbf19512239c1333b18712234afe4388 Mon Sep 17 00:00:00 2001 From: Manos Koukoutos Date: Wed, 14 Sep 2022 11:33:31 +0200 Subject: [PATCH 0098/1772] [wasm] Index wrappers by isorecursive canonical type Before, import and export wrappers were cached based on their signature. This change - makes wrapper canonicalization consistent with that of types and call_indirect signatures under --wasm-type-canonicalization, - removes the last uses of signature maps, which will enable us to remove them in a future CL. Change-Id: I512bc234f0ae10e50bd94237e8e675ca47ed13c5 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891250 Commit-Queue: Manos Koukoutos Reviewed-by: Jakob Kummerow Cr-Commit-Position: refs/heads/main@{#83183} --- src/runtime/runtime-wasm.cc | 4 +- src/wasm/canonical-types.cc | 33 +++++++++- src/wasm/canonical-types.h | 5 ++ src/wasm/function-compiler.cc | 24 ++++--- src/wasm/function-compiler.h | 10 ++- src/wasm/module-compiler.cc | 65 ++++++++++++------- src/wasm/module-compiler.h | 14 ++-- src/wasm/module-decoder-impl.h | 12 +--- src/wasm/module-instantiate.cc | 48 +++++++++----- src/wasm/wasm-import-wrapper-cache.cc | 10 +-- src/wasm/wasm-import-wrapper-cache.h | 32 ++++----- src/wasm/wasm-module.cc | 32 ++++----- src/wasm/wasm-module.h | 12 ++-- src/wasm/wasm-objects.cc | 23 ++++--- src/wasm/wasm-objects.tq | 2 + .../wasm/test-wasm-import-wrapper-cache.cc | 57 ++++++++++------ test/cctest/wasm/wasm-run-utils.cc | 5 +- test/cctest/wasm/wasm-run-utils.h | 2 +- 18 files changed, 241 insertions(+), 149 deletions(-) diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 1a8744d262..4f9bd6a616 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -293,6 +293,8 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) { const int function_index = function_data->function_index(); const wasm::WasmFunction& function = module->functions[function_index]; const wasm::FunctionSig* sig = function.sig; + const uint32_t canonical_sig_index = + module->isorecursive_canonical_type_ids[function.sig_index]; // The start function is not guaranteed to be registered as // an exported function (although it is called as one). @@ -307,7 +309,7 @@ RUNTIME_FUNCTION(Runtime_WasmCompileWrapper) { Handle wrapper_code = wasm::JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper( - isolate, sig, module); + isolate, sig, canonical_sig_index, module); // Replace the wrapper for the function that triggered the tier-up. // This is to verify that the wrapper is replaced, even if the function diff --git a/src/wasm/canonical-types.cc b/src/wasm/canonical-types.cc index 43cac17589..b7260081f4 100644 --- a/src/wasm/canonical-types.cc +++ b/src/wasm/canonical-types.cc @@ -10,7 +10,7 @@ namespace v8 { namespace internal { namespace wasm { -V8_EXPORT_PRIVATE TypeCanonicalizer* GetTypeCanonicalizer() { +TypeCanonicalizer* GetTypeCanonicalizer() { return GetWasmEngine()->type_canonicalizer(); } @@ -55,6 +55,33 @@ void TypeCanonicalizer::AddRecursiveGroup(WasmModule* module, uint32_t size) { } } +uint32_t TypeCanonicalizer::AddRecursiveGroup(const FunctionSig* sig) { + base::MutexGuard mutex_guard(&mutex_); +// Types in the signature must be module-independent. +#if DEBUG + for (ValueType type : sig->all()) DCHECK(!type.has_index()); +#endif + CanonicalGroup group; + group.types.resize(1); + group.types[0].type_def = TypeDefinition(sig, kNoSuperType); + group.types[0].is_relative_supertype = false; + int canonical_index = FindCanonicalGroup(group); + if (canonical_index < 0) { + canonical_index = static_cast(canonical_supertypes_.size()); + // We need to copy the signature in the local zone, or else we risk + // storing a dangling pointer in the future. + auto builder = FunctionSig::Builder(&zone_, sig->return_count(), + sig->parameter_count()); + for (auto type : sig->returns()) builder.AddReturn(type); + for (auto type : sig->parameters()) builder.AddParam(type); + const FunctionSig* allocated_sig = builder.Build(); + group.types[0].type_def = TypeDefinition(allocated_sig, kNoSuperType); + canonical_groups_.emplace(group, canonical_index); + canonical_supertypes_.emplace_back(kNoSuperType); + } + return canonical_index; +} + // An index in a type gets mapped to a relative index if it is inside the new // canonical group, or the canonical representative if it is not. ValueType TypeCanonicalizer::CanonicalizeValueType( @@ -88,8 +115,8 @@ bool TypeCanonicalizer::IsCanonicalSubtype(uint32_t sub_index, return false; } -// Map all type indices (including supertype) inside {type} to indices relative -// to {recursive_group_start}. +// Map all type indices (including supertype) inside {type} to indices +// relative to {recursive_group_start}. TypeCanonicalizer::CanonicalType TypeCanonicalizer::CanonicalizeTypeDef( const WasmModule* module, TypeDefinition type, uint32_t recursive_group_start) { diff --git a/src/wasm/canonical-types.h b/src/wasm/canonical-types.h index 47ad691083..ff0df8c568 100644 --- a/src/wasm/canonical-types.h +++ b/src/wasm/canonical-types.h @@ -43,6 +43,11 @@ class TypeCanonicalizer { // Modifies {module->isorecursive_canonical_type_ids}. V8_EXPORT_PRIVATE void AddRecursiveGroup(WasmModule* module, uint32_t size); + // Adds a module-independent signature as a recursive group, and canonicalizes + // it if an identical is found. Returns the canonical index of the added + // signature. + V8_EXPORT_PRIVATE uint32_t AddRecursiveGroup(const FunctionSig* sig); + // Returns if the type at {sub_index} in {sub_module} is a subtype of the // type at {super_index} in {super_module} after canonicalization. V8_EXPORT_PRIVATE bool IsCanonicalSubtype(uint32_t sub_index, diff --git a/src/wasm/function-compiler.cc b/src/wasm/function-compiler.cc index d4230e3b46..c0795308c0 100644 --- a/src/wasm/function-compiler.cc +++ b/src/wasm/function-compiler.cc @@ -204,12 +204,13 @@ bool UseGenericWrapper(const FunctionSig* sig) { } // namespace JSToWasmWrapperCompilationUnit::JSToWasmWrapperCompilationUnit( - Isolate* isolate, const FunctionSig* sig, const WasmModule* module, - bool is_import, const WasmFeatures& enabled_features, - AllowGeneric allow_generic) + Isolate* isolate, const FunctionSig* sig, uint32_t canonical_sig_index, + const WasmModule* module, bool is_import, + const WasmFeatures& enabled_features, AllowGeneric allow_generic) : isolate_(isolate), is_import_(is_import), sig_(sig), + canonical_sig_index_(canonical_sig_index), use_generic_wrapper_(allow_generic && UseGenericWrapper(sig) && !is_import), job_(use_generic_wrapper_ @@ -248,24 +249,27 @@ Handle JSToWasmWrapperCompilationUnit::Finalize() { // static Handle JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper( - Isolate* isolate, const FunctionSig* sig, const WasmModule* module, - bool is_import) { + Isolate* isolate, const FunctionSig* sig, uint32_t canonical_sig_index, + const WasmModule* module, bool is_import) { // Run the compilation unit synchronously. WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate); - JSToWasmWrapperCompilationUnit unit(isolate, sig, module, is_import, - enabled_features, kAllowGeneric); + JSToWasmWrapperCompilationUnit unit(isolate, sig, canonical_sig_index, module, + is_import, enabled_features, + kAllowGeneric); unit.Execute(); return unit.Finalize(); } // static Handle JSToWasmWrapperCompilationUnit::CompileSpecificJSToWasmWrapper( - Isolate* isolate, const FunctionSig* sig, const WasmModule* module) { + Isolate* isolate, const FunctionSig* sig, uint32_t canonical_sig_index, + const WasmModule* module) { // Run the compilation unit synchronously. const bool is_import = false; WasmFeatures enabled_features = WasmFeatures::FromIsolate(isolate); - JSToWasmWrapperCompilationUnit unit(isolate, sig, module, is_import, - enabled_features, kDontAllowGeneric); + JSToWasmWrapperCompilationUnit unit(isolate, sig, canonical_sig_index, module, + is_import, enabled_features, + kDontAllowGeneric); unit.Execute(); return unit.Finalize(); } diff --git a/src/wasm/function-compiler.h b/src/wasm/function-compiler.h index 71744db8ee..6d2d17cca2 100644 --- a/src/wasm/function-compiler.h +++ b/src/wasm/function-compiler.h @@ -104,6 +104,7 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final { enum AllowGeneric : bool { kAllowGeneric = true, kDontAllowGeneric = false }; JSToWasmWrapperCompilationUnit(Isolate* isolate, const FunctionSig* sig, + uint32_t canonical_sig_index, const wasm::WasmModule* module, bool is_import, const WasmFeatures& enabled_features, AllowGeneric allow_generic); @@ -116,18 +117,20 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final { bool is_import() const { return is_import_; } const FunctionSig* sig() const { return sig_; } + uint32_t canonical_sig_index() const { return canonical_sig_index_; } // Run a compilation unit synchronously. static Handle CompileJSToWasmWrapper(Isolate* isolate, const FunctionSig* sig, + uint32_t canonical_sig_index, const WasmModule* module, bool is_import); // Run a compilation unit synchronously, but ask for the specific // wrapper. - static Handle CompileSpecificJSToWasmWrapper(Isolate* isolate, - const FunctionSig* sig, - const WasmModule* module); + static Handle CompileSpecificJSToWasmWrapper( + Isolate* isolate, const FunctionSig* sig, uint32_t canonical_sig_index, + const WasmModule* module); private: // Wrapper compilation is bound to an isolate. Concurrent accesses to the @@ -137,6 +140,7 @@ class V8_EXPORT_PRIVATE JSToWasmWrapperCompilationUnit final { Isolate* isolate_; bool is_import_; const FunctionSig* sig_; + uint32_t canonical_sig_index_; bool use_generic_wrapper_; std::unique_ptr job_; }; diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc index ce046834ec..d3f56a9517 100644 --- a/src/wasm/module-compiler.cc +++ b/src/wasm/module-compiler.cc @@ -1713,7 +1713,8 @@ CompilationExecutionResult ExecuteCompilationUnits( UNREACHABLE(); } -using JSToWasmWrapperKey = std::pair; +// (function is imported, canonical type index) +using JSToWasmWrapperKey = std::pair; // Returns the number of units added. int AddExportWrapperUnits(Isolate* isolate, NativeModule* native_module, @@ -1722,11 +1723,14 @@ int AddExportWrapperUnits(Isolate* isolate, NativeModule* native_module, for (auto exp : native_module->module()->export_table) { if (exp.kind != kExternalFunction) continue; auto& function = native_module->module()->functions[exp.index]; - JSToWasmWrapperKey key(function.imported, *function.sig); + uint32_t canonical_type_index = + native_module->module() + ->isorecursive_canonical_type_ids[function.sig_index]; + JSToWasmWrapperKey key(function.imported, canonical_type_index); if (keys.insert(key).second) { auto unit = std::make_shared( - isolate, function.sig, native_module->module(), function.imported, - native_module->enabled_features(), + isolate, function.sig, canonical_type_index, native_module->module(), + function.imported, native_module->enabled_features(), JSToWasmWrapperCompilationUnit::kAllowGeneric); builder->AddJSToWasmWrapperUnit(std::move(unit)); } @@ -1743,14 +1747,18 @@ int AddImportWrapperUnits(NativeModule* native_module, keys; int num_imported_functions = native_module->num_imported_functions(); for (int func_index = 0; func_index < num_imported_functions; func_index++) { - const FunctionSig* sig = native_module->module()->functions[func_index].sig; - if (!IsJSCompatibleSignature(sig, native_module->module(), + const WasmFunction& function = + native_module->module()->functions[func_index]; + if (!IsJSCompatibleSignature(function.sig, native_module->module(), native_module->enabled_features())) { continue; } + uint32_t canonical_type_index = + native_module->module() + ->isorecursive_canonical_type_ids[function.sig_index]; WasmImportWrapperCache::CacheKey key( - compiler::kDefaultImportCallKind, sig, - static_cast(sig->parameter_count()), kNoSuspend); + compiler::kDefaultImportCallKind, canonical_type_index, + static_cast(function.sig->parameter_count()), kNoSuspend); auto it = keys.insert(key); if (it.second) { // Ensure that all keys exist in the cache, so that we can populate the @@ -3553,8 +3561,8 @@ void CompilationStateImpl::FinalizeJSToWasmWrappers( for (auto& unit : js_to_wasm_wrapper_units_) { DCHECK_EQ(isolate, unit->isolate()); Handle code = unit->Finalize(); - int wrapper_index = - GetExportWrapperIndex(module, unit->sig(), unit->is_import()); + int wrapper_index = GetExportWrapperIndex( + module, unit->canonical_sig_index(), unit->is_import()); (*export_wrappers_out)->set(wrapper_index, *code); RecordStats(*code, isolate->counters()); } @@ -3752,11 +3760,14 @@ void CompilationStateImpl::PublishCompilationResults( DCHECK_LE(0, func_index); DCHECK_LT(func_index, native_module_->num_functions()); if (func_index < num_imported_functions) { - const FunctionSig* sig = - native_module_->module()->functions[func_index].sig; + const WasmFunction& function = + native_module_->module()->functions[func_index]; + uint32_t canonical_type_index = + native_module_->module() + ->isorecursive_canonical_type_ids[function.sig_index]; WasmImportWrapperCache::CacheKey key( - compiler::kDefaultImportCallKind, sig, - static_cast(sig->parameter_count()), kNoSuspend); + compiler::kDefaultImportCallKind, canonical_type_index, + static_cast(function.sig->parameter_count()), kNoSuspend); // If two imported functions have the same key, only one of them should // have been added as a compilation unit. So it is always the first time // we compile a wrapper for this key here. @@ -3889,8 +3900,8 @@ void CompilationStateImpl::WaitForCompilationEvent( } namespace { -using JSToWasmWrapperQueue = - WrapperQueue>; +using JSToWasmWrapperQueue = WrapperQueue>; using JSToWasmWrapperUnitMap = std::unordered_map, @@ -3905,8 +3916,10 @@ class CompileJSToWasmWrapperJob final : public JobTask { outstanding_units_(queue->size()) {} void Run(JobDelegate* delegate) override { - while (base::Optional key = queue_->pop()) { - JSToWasmWrapperCompilationUnit* unit = (*compilation_units_)[*key].get(); + while (base::Optional> key = + queue_->pop()) { + JSToWasmWrapperCompilationUnit* unit = + (*compilation_units_)[key->first].get(); unit->Execute(); outstanding_units_.fetch_sub(1, std::memory_order_relaxed); if (delegate && delegate->ShouldYield()) return; @@ -3943,10 +3956,13 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module, for (auto exp : module->export_table) { if (exp.kind != kExternalFunction) continue; auto& function = module->functions[exp.index]; - JSToWasmWrapperKey key(function.imported, *function.sig); - if (queue.insert(key)) { + uint32_t canonical_type_index = + module->isorecursive_canonical_type_ids[function.sig_index]; + JSToWasmWrapperKey key(function.imported, canonical_type_index); + if (queue.insert(key, nullptr)) { auto unit = std::make_unique( - isolate, function.sig, module, function.imported, enabled_features, + isolate, function.sig, canonical_type_index, module, + function.imported, enabled_features, JSToWasmWrapperCompilationUnit::kAllowGeneric); compilation_units.emplace(key, std::move(unit)); } @@ -3981,7 +3997,7 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module, JSToWasmWrapperCompilationUnit* unit = pair.second.get(); DCHECK_EQ(isolate, unit->isolate()); Handle code = unit->Finalize(); - int wrapper_index = GetExportWrapperIndex(module, &key.second, key.first); + int wrapper_index = GetExportWrapperIndex(module, key.second, key.first); (*export_wrappers_out)->set(wrapper_index, *code); RecordStats(*code, isolate->counters()); } @@ -3990,12 +4006,13 @@ void CompileJsToWasmWrappers(Isolate* isolate, const WasmModule* module, WasmCode* CompileImportWrapper( NativeModule* native_module, Counters* counters, compiler::WasmImportCallKind kind, const FunctionSig* sig, - int expected_arity, Suspend suspend, + uint32_t canonical_type_index, int expected_arity, Suspend suspend, WasmImportWrapperCache::ModificationScope* cache_scope) { // Entry should exist, so that we don't insert a new one and invalidate // other threads' iterators/references, but it should not have been compiled // yet. - WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity, suspend); + WasmImportWrapperCache::CacheKey key(kind, canonical_type_index, + expected_arity, suspend); DCHECK_NULL((*cache_scope)[key]); bool source_positions = is_asmjs_module(native_module->module()); // Keep the {WasmCode} alive until we explicitly call {IncRef}. diff --git a/src/wasm/module-compiler.h b/src/wasm/module-compiler.h index 20320899eb..b58090e136 100644 --- a/src/wasm/module-compiler.h +++ b/src/wasm/module-compiler.h @@ -74,7 +74,7 @@ V8_EXPORT_PRIVATE WasmCode* CompileImportWrapper( NativeModule* native_module, Counters* counters, compiler::WasmImportCallKind kind, const FunctionSig* sig, - int expected_arity, Suspend suspend, + uint32_t canonical_type_index, int expected_arity, Suspend suspend, WasmImportWrapperCache::ModificationScope* cache_scope); // Triggered by the WasmCompileLazy builtin. The return value indicates whether @@ -96,14 +96,14 @@ V8_EXPORT_PRIVATE void TriggerTierUp(WasmInstanceObject instance, void TierUpNowForTesting(Isolate* isolate, WasmInstanceObject instance, int func_index); -template +template class WrapperQueue { public: // Removes an arbitrary key from the queue and returns it. // If the queue is empty, returns nullopt. // Thread-safe. - base::Optional pop() { - base::Optional key = base::nullopt; + base::Optional> pop() { + base::Optional> key = base::nullopt; base::MutexGuard lock(&mutex_); auto it = queue_.begin(); if (it != queue_.end()) { @@ -116,7 +116,9 @@ class WrapperQueue { // Add the given key to the queue and returns true iff the insert was // successful. // Not thread-safe. - bool insert(const Key& key) { return queue_.insert(key).second; } + bool insert(const Key& key, KeyInfo key_info) { + return queue_.insert({key, key_info}).second; + } size_t size() { base::MutexGuard lock(&mutex_); @@ -125,7 +127,7 @@ class WrapperQueue { private: base::Mutex mutex_; - std::unordered_set queue_; + std::unordered_map queue_; }; // Encapsulates all the state and steps of an asynchronous compilation. diff --git a/src/wasm/module-decoder-impl.h b/src/wasm/module-decoder-impl.h index 227fc327c2..268c2b62c9 100644 --- a/src/wasm/module-decoder-impl.h +++ b/src/wasm/module-decoder-impl.h @@ -684,9 +684,7 @@ class ModuleDecoderTemplate : public Decoder { const FunctionSig* sig = consume_sig(module_->signature_zone.get()); if (!ok()) break; module_->add_signature(sig, kNoSuperType); - if (v8_flags.wasm_type_canonicalization) { - type_canon->AddRecursiveGroup(module_.get(), 1); - } + type_canon->AddRecursiveGroup(module_.get(), 1); break; } case kWasmArrayTypeCode: @@ -727,17 +725,13 @@ class ModuleDecoderTemplate : public Decoder { TypeDefinition type = consume_subtype_definition(); if (ok()) module_->add_type(type); } - if (ok() && v8_flags.wasm_type_canonicalization) { - type_canon->AddRecursiveGroup(module_.get(), group_size); - } + if (ok()) type_canon->AddRecursiveGroup(module_.get(), group_size); } else { tracer_.TypeOffset(pc_offset()); TypeDefinition type = consume_subtype_definition(); if (ok()) { module_->add_type(type); - if (v8_flags.wasm_type_canonicalization) { - type_canon->AddRecursiveGroup(module_.get(), 1); - } + type_canon->AddRecursiveGroup(module_.get(), 1); } } } diff --git a/src/wasm/module-instantiate.cc b/src/wasm/module-instantiate.cc index a12cc954e1..5d119a44b4 100644 --- a/src/wasm/module-instantiate.cc +++ b/src/wasm/module-instantiate.cc @@ -42,8 +42,9 @@ byte* raw_buffer_ptr(MaybeHandle buffer, int offset) { return static_cast(buffer.ToHandleChecked()->backing_store()) + offset; } -using ImportWrapperQueue = WrapperQueue; +using ImportWrapperQueue = + WrapperQueue; class CompileImportWrapperJob final : public JobTask { public: @@ -66,12 +67,15 @@ class CompileImportWrapperJob final : public JobTask { void Run(JobDelegate* delegate) override { TRACE_EVENT0("v8.wasm", "wasm.CompileImportWrapperJob.Run"); - while (base::Optional key = - queue_->pop()) { + while (base::Optional> + key = queue_->pop()) { // TODO(wasm): Batch code publishing, to avoid repeated locking and // permission switching. - CompileImportWrapper(native_module_, counters_, key->kind, key->signature, - key->expected_arity, key->suspend, cache_scope_); + CompileImportWrapper(native_module_, counters_, key->first.kind, + key->second, key->first.canonical_type_index, + key->first.expected_arity, key->first.suspend, + cache_scope_); if (delegate->ShouldYield()) return; } } @@ -828,9 +832,13 @@ MaybeHandle InstanceBuilder::Build() { if (module_->start_function_index >= 0) { int start_index = module_->start_function_index; auto& function = module_->functions[start_index]; + uint32_t canonical_sig_index = + module_->isorecursive_canonical_type_ids[module_->functions[start_index] + .sig_index]; Handle wrapper_code = JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper( - isolate_, function.sig, module_, function.imported); + isolate_, function.sig, canonical_sig_index, module_, + function.imported); // TODO(clemensb): Don't generate an exported function for the start // function. Use CWasmEntry instead. start_function_ = WasmExportedFunction::New( @@ -1158,15 +1166,18 @@ bool InstanceBuilder::ProcessImportedFunction( WasmImportWrapperCache* cache = native_module->import_wrapper_cache(); // TODO(jkummerow): Consider precompiling CapiCallWrappers in parallel, // just like other import wrappers. - WasmCode* wasm_code = - cache->MaybeGet(kind, expected_sig, expected_arity, kNoSuspend); + uint32_t canonical_type_index = + module_->isorecursive_canonical_type_ids + [module_->functions[func_index].sig_index]; + WasmCode* wasm_code = cache->MaybeGet(kind, canonical_type_index, + expected_arity, kNoSuspend); if (wasm_code == nullptr) { WasmCodeRefScope code_ref_scope; WasmImportWrapperCache::ModificationScope cache_scope(cache); wasm_code = compiler::CompileWasmCapiCallWrapper(native_module, expected_sig); - WasmImportWrapperCache::CacheKey key(kind, expected_sig, expected_arity, - kNoSuspend); + WasmImportWrapperCache::CacheKey key(kind, canonical_type_index, + expected_arity, kNoSuspend); cache_scope[key] = wasm_code; wasm_code->IncRef(); isolate_->counters()->wasm_generated_code_size()->Increment( @@ -1203,8 +1214,11 @@ bool InstanceBuilder::ProcessImportedFunction( } NativeModule* native_module = instance->module_object().native_module(); + uint32_t canonical_type_index = + module_->isorecursive_canonical_type_ids + [module_->functions[func_index].sig_index]; WasmCode* wasm_code = native_module->import_wrapper_cache()->Get( - kind, expected_sig, expected_arity, resolved.suspend); + kind, canonical_type_index, expected_arity, resolved.suspend); DCHECK_NOT_NULL(wasm_code); ImportedFunctionEntry entry(instance, func_index); if (wasm_code->kind() == WasmCode::kWasmToJsWrapper) { @@ -1614,14 +1628,16 @@ void InstanceBuilder::CompileImportWrappers( expected_arity = shared.internal_formal_parameter_count_without_receiver(); } - - WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity, - resolved.suspend); + uint32_t canonical_type_index = + module_->isorecursive_canonical_type_ids[module_->functions[func_index] + .sig_index]; + WasmImportWrapperCache::CacheKey key(kind, canonical_type_index, + expected_arity, resolved.suspend); if (cache_scope[key] != nullptr) { // Cache entry already exists, no need to compile it again. continue; } - import_wrapper_queue.insert(key); + import_wrapper_queue.insert(key, sig); } auto compile_job_task = std::make_unique( diff --git a/src/wasm/wasm-import-wrapper-cache.cc b/src/wasm/wasm-import-wrapper-cache.cc index 7c6ca19f08..bbe3a480e1 100644 --- a/src/wasm/wasm-import-wrapper-cache.cc +++ b/src/wasm/wasm-import-wrapper-cache.cc @@ -23,23 +23,25 @@ WasmCode*& WasmImportWrapperCache::operator[]( } WasmCode* WasmImportWrapperCache::Get(compiler::WasmImportCallKind kind, - const FunctionSig* sig, + uint32_t canonical_type_index, int expected_arity, Suspend suspend) const { base::MutexGuard lock(&mutex_); - auto it = entry_map_.find({kind, sig, expected_arity, suspend}); + auto it = + entry_map_.find({kind, canonical_type_index, expected_arity, suspend}); DCHECK(it != entry_map_.end()); return it->second; } WasmCode* WasmImportWrapperCache::MaybeGet(compiler::WasmImportCallKind kind, - const FunctionSig* sig, + uint32_t canonical_type_index, int expected_arity, Suspend suspend) const { base::MutexGuard lock(&mutex_); - auto it = entry_map_.find({kind, sig, expected_arity, suspend}); + auto it = + entry_map_.find({kind, canonical_type_index, expected_arity, suspend}); if (it == entry_map_.end()) return nullptr; return it->second; } diff --git a/src/wasm/wasm-import-wrapper-cache.h b/src/wasm/wasm-import-wrapper-cache.h index f12b07477f..e07431dd94 100644 --- a/src/wasm/wasm-import-wrapper-cache.h +++ b/src/wasm/wasm-import-wrapper-cache.h @@ -28,22 +28,23 @@ using FunctionSig = Signature; class WasmImportWrapperCache { public: struct CacheKey { - CacheKey(const compiler::WasmImportCallKind& _kind, const FunctionSig* _sig, - int _expected_arity, Suspend _suspend) - : kind(_kind), - signature(_sig), - expected_arity(_expected_arity == kDontAdaptArgumentsSentinel + CacheKey(const compiler::WasmImportCallKind& kind, + uint32_t canonical_type_index, int expected_arity, Suspend suspend) + : kind(kind), + canonical_type_index(canonical_type_index), + expected_arity(expected_arity == kDontAdaptArgumentsSentinel ? 0 - : _expected_arity), - suspend(_suspend) {} + : expected_arity), + suspend(suspend) {} bool operator==(const CacheKey& rhs) const { - return kind == rhs.kind && signature == rhs.signature && + return kind == rhs.kind && + canonical_type_index == rhs.canonical_type_index && expected_arity == rhs.expected_arity && suspend == rhs.suspend; } compiler::WasmImportCallKind kind; - const FunctionSig* signature; + uint32_t canonical_type_index; int expected_arity; Suspend suspend; }; @@ -51,8 +52,8 @@ class WasmImportWrapperCache { class CacheKeyHash { public: size_t operator()(const CacheKey& key) const { - return base::hash_combine(static_cast(key.kind), key.signature, - key.expected_arity); + return base::hash_combine(static_cast(key.kind), + key.canonical_type_index, key.expected_arity); } }; @@ -75,11 +76,12 @@ class WasmImportWrapperCache { // Thread-safe. Assumes the key exists in the map. V8_EXPORT_PRIVATE WasmCode* Get(compiler::WasmImportCallKind kind, - const FunctionSig* sig, int expected_arity, - Suspend suspend) const; + uint32_t canonical_type_index, + int expected_arity, Suspend suspend) const; // Thread-safe. Returns nullptr if the key doesn't exist in the map. - WasmCode* MaybeGet(compiler::WasmImportCallKind kind, const FunctionSig* sig, - int expected_arity, Suspend suspend) const; + WasmCode* MaybeGet(compiler::WasmImportCallKind kind, + uint32_t canonical_type_index, int expected_arity, + Suspend suspend) const; ~WasmImportWrapperCache(); diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc index 85a415c2ef..246f919a2f 100644 --- a/src/wasm/wasm-module.cc +++ b/src/wasm/wasm-module.cc @@ -66,29 +66,19 @@ bool LazilyGeneratedNames::Has(uint32_t function_index) { // static int MaxNumExportWrappers(const WasmModule* module) { - // For each signature there may exist a wrapper, both for imported and - // internal functions. - return static_cast(module->signature_map.size()) * 2; + if (module->isorecursive_canonical_type_ids.empty()) return 0; + // TODO(manoskouk): This will create oversized wrappers for modules with few + // types but large canonical type indices. Move wrappers to isolate to avoid + // this. + uint32_t max_canonical_index = + *std::max_element(module->isorecursive_canonical_type_ids.begin(), + module->isorecursive_canonical_type_ids.end()); + return (max_canonical_index + 1) * 2; } -int GetExportWrapperIndexInternal(const WasmModule* module, - int canonical_sig_index, bool is_import) { - if (is_import) canonical_sig_index += module->signature_map.size(); - return canonical_sig_index; -} - -int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig, - bool is_import) { - int canonical_sig_index = module->signature_map.Find(*sig); - CHECK_GE(canonical_sig_index, 0); - return GetExportWrapperIndexInternal(module, canonical_sig_index, is_import); -} - -int GetExportWrapperIndex(const WasmModule* module, uint32_t sig_index, - bool is_import) { - uint32_t canonical_sig_index = - module->per_module_canonical_type_ids[sig_index]; - return GetExportWrapperIndexInternal(module, canonical_sig_index, is_import); +int GetExportWrapperIndex(const WasmModule* module, + uint32_t canonical_sig_index, bool is_import) { + return 2 * canonical_sig_index + (is_import ? 1 : 0); } // static diff --git a/src/wasm/wasm-module.h b/src/wasm/wasm-module.h index 14a667759a..eb0dee27c4 100644 --- a/src/wasm/wasm-module.h +++ b/src/wasm/wasm-module.h @@ -629,13 +629,11 @@ size_t EstimateStoredSize(const WasmModule* module); // Returns the number of possible export wrappers for a given module. V8_EXPORT_PRIVATE int MaxNumExportWrappers(const WasmModule* module); -// Returns the wrapper index for a function in {module} with signature {sig} -// or {sig_index} and origin defined by {is_import}. -// Prefer to use the {sig_index} consuming version, as it is much faster. -int GetExportWrapperIndex(const WasmModule* module, const FunctionSig* sig, - bool is_import); -int GetExportWrapperIndex(const WasmModule* module, uint32_t sig_index, - bool is_import); +// Returns the wrapper index for a function in {module} with isorecursive +// canonical signature index {canonical_sig_index}, and origin defined by +// {is_import}. +int GetExportWrapperIndex(const WasmModule* module, + uint32_t canonical_sig_index, bool is_import); // Return the byte offset of the function identified by the given index. // The offset will be relative to the start of the module bytes. diff --git a/src/wasm/wasm-objects.cc b/src/wasm/wasm-objects.cc index 8779adbfc6..79340c44ae 100644 --- a/src/wasm/wasm-objects.cc +++ b/src/wasm/wasm-objects.cc @@ -548,14 +548,16 @@ void WasmTableObject::UpdateDispatchTables( instance->module_object().native_module(); wasm::WasmImportWrapperCache* cache = native_module->import_wrapper_cache(); auto kind = compiler::WasmImportCallKind::kWasmToCapi; - wasm::WasmCode* wasm_code = - cache->MaybeGet(kind, &sig, param_count, wasm::kNoSuspend); + uint32_t canonical_type_index = + wasm::GetTypeCanonicalizer()->AddRecursiveGroup(&sig); + wasm::WasmCode* wasm_code = cache->MaybeGet(kind, canonical_type_index, + param_count, wasm::kNoSuspend); if (wasm_code == nullptr) { wasm::WasmCodeRefScope code_ref_scope; wasm::WasmImportWrapperCache::ModificationScope cache_scope(cache); wasm_code = compiler::CompileWasmCapiCallWrapper(native_module, &sig); - wasm::WasmImportWrapperCache::CacheKey key(kind, &sig, param_count, - wasm::kNoSuspend); + wasm::WasmImportWrapperCache::CacheKey key(kind, canonical_type_index, + param_count, wasm::kNoSuspend); cache_scope[key] = wasm_code; wasm_code->IncRef(); isolate->counters()->wasm_generated_code_size()->Increment( @@ -567,7 +569,9 @@ void WasmTableObject::UpdateDispatchTables( // not found; it will simply never match any check. // It is safe to use this even when v8_flags.wasm_type_canonicalization, as // the C API cannot refer to user-defined types. - auto sig_id = instance->module()->signature_map.Find(sig); + auto sig_id = v8_flags.wasm_type_canonicalization + ? canonical_type_index + : instance->module()->signature_map.Find(sig); instance->GetIndirectFunctionTable(isolate, table_index) ->Set(entry_index, sig_id, wasm_code->instruction_start(), WasmCapiFunctionData::cast( @@ -1379,10 +1383,10 @@ WasmInstanceObject::GetOrCreateWasmInternalFunction( Handle module_object(instance->module_object(), isolate); const WasmModule* module = module_object->module(); const WasmFunction& function = module->functions[function_index]; + uint32_t canonical_sig_index = + module->isorecursive_canonical_type_ids[function.sig_index]; int wrapper_index = - GetExportWrapperIndex(module, function.sig_index, function.imported); - DCHECK_EQ(wrapper_index, - GetExportWrapperIndex(module, function.sig, function.imported)); + GetExportWrapperIndex(module, canonical_sig_index, function.imported); Handle entry = FixedArray::get(module_object->export_wrappers(), wrapper_index, isolate); @@ -1395,7 +1399,8 @@ WasmInstanceObject::GetOrCreateWasmInternalFunction( // this signature. We compile it and store the wrapper in the module for // later use. wrapper = wasm::JSToWasmWrapperCompilationUnit::CompileJSToWasmWrapper( - isolate, function.sig, instance->module(), function.imported); + isolate, function.sig, canonical_sig_index, instance->module(), + function.imported); module_object->export_wrappers().set(wrapper_index, *wrapper); } auto external = Handle::cast(WasmExportedFunction::New( diff --git a/src/wasm/wasm-objects.tq b/src/wasm/wasm-objects.tq index 55a7e7458d..e6543b990b 100644 --- a/src/wasm/wasm-objects.tq +++ b/src/wasm/wasm-objects.tq @@ -77,11 +77,13 @@ extern class WasmExportedFunctionData extends WasmFunctionData { extern class WasmJSFunctionData extends WasmFunctionData { serialized_return_count: Smi; serialized_parameter_count: Smi; + // TODO(7748): Maybe store the canonical type index of the signature instead. serialized_signature: PodArrayOfWasmValueType; } extern class WasmCapiFunctionData extends WasmFunctionData { embedder_data: Foreign; // Managed + // TODO(7748): Maybe store the canonical type index of the signature instead. serialized_signature: PodArrayOfWasmValueType; } diff --git a/test/cctest/wasm/test-wasm-import-wrapper-cache.cc b/test/cctest/wasm/test-wasm-import-wrapper-cache.cc index b8a22392a8..9e11ab13e0 100644 --- a/test/cctest/wasm/test-wasm-import-wrapper-cache.cc +++ b/test/cctest/wasm/test-wasm-import-wrapper-cache.cc @@ -37,16 +37,19 @@ TEST(CacheHit) { auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch; auto sig = sigs.i_i(); + uint32_t canonical_type_index = + GetTypeCanonicalizer()->AddRecursiveGroup(sig); int expected_arity = static_cast(sig->parameter_count()); - WasmCode* c1 = - CompileImportWrapper(module.get(), isolate->counters(), kind, sig, - expected_arity, kNoSuspend, &cache_scope); + WasmCode* c1 = CompileImportWrapper(module.get(), isolate->counters(), kind, + sig, canonical_type_index, expected_arity, + kNoSuspend, &cache_scope); CHECK_NOT_NULL(c1); CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind()); - WasmCode* c2 = cache_scope[{kind, sig, expected_arity, kNoSuspend}]; + WasmCode* c2 = + cache_scope[{kind, canonical_type_index, expected_arity, kNoSuspend}]; CHECK_NOT_NULL(c2); CHECK_EQ(c1, c2); @@ -63,17 +66,22 @@ TEST(CacheMissSig) { auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch; auto sig1 = sigs.i_i(); int expected_arity1 = static_cast(sig1->parameter_count()); + uint32_t canonical_type_index1 = + GetTypeCanonicalizer()->AddRecursiveGroup(sig1); auto sig2 = sigs.i_ii(); int expected_arity2 = static_cast(sig2->parameter_count()); + uint32_t canonical_type_index2 = + GetTypeCanonicalizer()->AddRecursiveGroup(sig2); - WasmCode* c1 = - CompileImportWrapper(module.get(), isolate->counters(), kind, sig1, - expected_arity1, kNoSuspend, &cache_scope); + WasmCode* c1 = CompileImportWrapper( + module.get(), isolate->counters(), kind, sig1, canonical_type_index1, + expected_arity1, kNoSuspend, &cache_scope); CHECK_NOT_NULL(c1); CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind()); - WasmCode* c2 = cache_scope[{kind, sig2, expected_arity2, kNoSuspend}]; + WasmCode* c2 = + cache_scope[{kind, canonical_type_index2, expected_arity2, kNoSuspend}]; CHECK_NULL(c2); } @@ -90,15 +98,18 @@ TEST(CacheMissKind) { auto kind2 = compiler::WasmImportCallKind::kJSFunctionArityMismatch; auto sig = sigs.i_i(); int expected_arity = static_cast(sig->parameter_count()); + uint32_t canonical_type_index = + GetTypeCanonicalizer()->AddRecursiveGroup(sig); - WasmCode* c1 = - CompileImportWrapper(module.get(), isolate->counters(), kind1, sig, - expected_arity, kNoSuspend, &cache_scope); + WasmCode* c1 = CompileImportWrapper(module.get(), isolate->counters(), kind1, + sig, canonical_type_index, expected_arity, + kNoSuspend, &cache_scope); CHECK_NOT_NULL(c1); CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind()); - WasmCode* c2 = cache_scope[{kind2, sig, expected_arity, kNoSuspend}]; + WasmCode* c2 = + cache_scope[{kind2, canonical_type_index, expected_arity, kNoSuspend}]; CHECK_NULL(c2); } @@ -114,31 +125,39 @@ TEST(CacheHitMissSig) { auto kind = compiler::WasmImportCallKind::kJSFunctionArityMatch; auto sig1 = sigs.i_i(); int expected_arity1 = static_cast(sig1->parameter_count()); + uint32_t canonical_type_index1 = + GetTypeCanonicalizer()->AddRecursiveGroup(sig1); auto sig2 = sigs.i_ii(); int expected_arity2 = static_cast(sig2->parameter_count()); + uint32_t canonical_type_index2 = + GetTypeCanonicalizer()->AddRecursiveGroup(sig2); - WasmCode* c1 = - CompileImportWrapper(module.get(), isolate->counters(), kind, sig1, - expected_arity1, kNoSuspend, &cache_scope); + WasmCode* c1 = CompileImportWrapper( + module.get(), isolate->counters(), kind, sig1, canonical_type_index1, + expected_arity1, kNoSuspend, &cache_scope); CHECK_NOT_NULL(c1); CHECK_EQ(WasmCode::Kind::kWasmToJsWrapper, c1->kind()); - WasmCode* c2 = cache_scope[{kind, sig2, expected_arity2, kNoSuspend}]; + WasmCode* c2 = + cache_scope[{kind, canonical_type_index2, expected_arity2, kNoSuspend}]; CHECK_NULL(c2); c2 = CompileImportWrapper(module.get(), isolate->counters(), kind, sig2, - expected_arity2, kNoSuspend, &cache_scope); + canonical_type_index2, expected_arity2, kNoSuspend, + &cache_scope); CHECK_NE(c1, c2); - WasmCode* c3 = cache_scope[{kind, sig1, expected_arity1, kNoSuspend}]; + WasmCode* c3 = + cache_scope[{kind, canonical_type_index1, expected_arity1, kNoSuspend}]; CHECK_NOT_NULL(c3); CHECK_EQ(c1, c3); - WasmCode* c4 = cache_scope[{kind, sig2, expected_arity2, kNoSuspend}]; + WasmCode* c4 = + cache_scope[{kind, canonical_type_index2, expected_arity2, kNoSuspend}]; CHECK_NOT_NULL(c4); CHECK_EQ(c2, c4); diff --git a/test/cctest/wasm/wasm-run-utils.cc b/test/cctest/wasm/wasm-run-utils.cc index d7e05141b1..fadb9739ff 100644 --- a/test/cctest/wasm/wasm-run-utils.cc +++ b/test/cctest/wasm/wasm-run-utils.cc @@ -82,14 +82,17 @@ TestingModuleBuilder::TestingModuleBuilder( Handle callable = resolved.callable; WasmImportWrapperCache::ModificationScope cache_scope( native_module_->import_wrapper_cache()); + uint32_t canonical_type_index = + GetTypeCanonicalizer()->AddRecursiveGroup(maybe_import->sig); WasmImportWrapperCache::CacheKey key( - kind, maybe_import->sig, + kind, canonical_type_index, static_cast(maybe_import->sig->parameter_count()), kNoSuspend); auto import_wrapper = cache_scope[key]; if (import_wrapper == nullptr) { CodeSpaceWriteScope write_scope(native_module_); import_wrapper = CompileImportWrapper( native_module_, isolate_->counters(), kind, maybe_import->sig, + canonical_type_index, static_cast(maybe_import->sig->parameter_count()), kNoSuspend, &cache_scope); } diff --git a/test/cctest/wasm/wasm-run-utils.h b/test/cctest/wasm/wasm-run-utils.h index 0ce398339c..f105e799da 100644 --- a/test/cctest/wasm/wasm-run-utils.h +++ b/test/cctest/wasm/wasm-run-utils.h @@ -138,8 +138,8 @@ class TestingModuleBuilder { DCHECK_EQ(test_module_->types.size(), test_module_->per_module_canonical_type_ids.size()); test_module_->add_signature(sig, kNoSuperType); + GetTypeCanonicalizer()->AddRecursiveGroup(test_module_.get(), 1); if (v8_flags.wasm_type_canonicalization) { - GetTypeCanonicalizer()->AddRecursiveGroup(test_module_.get(), 1); instance_object_->set_isorecursive_canonical_types( test_module_->isorecursive_canonical_type_ids.data()); } From 6b13d2589796da00e9bfda56a082376dd5ce1b83 Mon Sep 17 00:00:00 2001 From: Thibaud Michaud Date: Tue, 13 Sep 2022 11:23:15 +0200 Subject: [PATCH 0099/1772] [wasm] Fix inconsistent stack state During a stack switch, the stack state is temporarily inconsistent when the old stack is marked as "inactive" and the new stack is not yet marked as "active". Ensure that the WasmAllocateSuspender runtime function is not called in an inconsistent state. It can trigger a GC, and we need a consistent state to iterate the roots. Wait until the end of the function to mark the current stack as "inactive", so that it is still marked as "active" when it is potentially visited. R=clemensb@chromium.org Bug: v8:13272 Change-Id: I65fe76c3d222d9fa47d17b66069443ceabba47ad Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890919 Reviewed-by: Clemens Backes Commit-Queue: Thibaud Michaud Cr-Commit-Position: refs/heads/main@{#83184} --- src/builtins/arm64/builtins-arm64.cc | 6 ++++-- src/builtins/x64/builtins-x64.cc | 6 ++++-- src/runtime/runtime-wasm.cc | 5 +++++ 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc index 41cd0225c5..00368d0da6 100644 --- a/src/builtins/arm64/builtins-arm64.cc +++ b/src/builtins/arm64/builtins-arm64.cc @@ -3106,8 +3106,6 @@ void FillJumpBuffer(MacroAssembler* masm, Register jmpbuf, Label* pc, __ Str(tmp, MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset)); __ Adr(tmp, pc); __ Str(tmp, MemOperand(jmpbuf, wasm::kJmpBufPcOffset)); - SwitchStackState(masm, jmpbuf, tmp, wasm::JumpBuffer::Active, - wasm::JumpBuffer::Inactive); } void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf, bool load_pc, @@ -4423,6 +4421,8 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { FieldMemOperand(continuation, WasmContinuationObject::kJmpbufOffset), kWasmContinuationJmpbufTag); FillJumpBuffer(masm, jmpbuf, &resume, scratch); + SwitchStackState(masm, jmpbuf, scratch, wasm::JumpBuffer::Active, + wasm::JumpBuffer::Inactive); __ Move(scratch, Smi::FromInt(WasmSuspenderObject::kSuspended)); __ StoreTaggedField( scratch, @@ -4573,6 +4573,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { WasmContinuationObject::kJmpbufOffset), kWasmContinuationJmpbufTag); FillJumpBuffer(masm, current_jmpbuf, &suspend, scratch); + SwitchStackState(masm, current_jmpbuf, scratch, wasm::JumpBuffer::Active, + wasm::JumpBuffer::Inactive); FREE_REG(current_jmpbuf); // ------------------------------------------- diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc index d5aaedb2b3..03539e1caa 100644 --- a/src/builtins/x64/builtins-x64.cc +++ b/src/builtins/x64/builtins-x64.cc @@ -2889,8 +2889,6 @@ void FillJumpBuffer(MacroAssembler* masm, Register jmpbuf, Label* pc) { __ movq(MemOperand(jmpbuf, wasm::kJmpBufStackLimitOffset), kScratchRegister); __ leaq(kScratchRegister, MemOperand(pc, 0)); __ movq(MemOperand(jmpbuf, wasm::kJmpBufPcOffset), kScratchRegister); - SwitchStackState(masm, jmpbuf, wasm::JumpBuffer::Active, - wasm::JumpBuffer::Inactive); } void LoadJumpBuffer(MacroAssembler* masm, Register jmpbuf, bool load_pc) { @@ -3982,6 +3980,8 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* masm) { jmpbuf, FieldOperand(continuation, WasmContinuationObject::kJmpbufOffset), kWasmContinuationJmpbufTag, r8); FillJumpBuffer(masm, jmpbuf, &resume); + SwitchStackState(masm, jmpbuf, wasm::JumpBuffer::Active, + wasm::JumpBuffer::Inactive); __ StoreTaggedSignedField( FieldOperand(suspender, WasmSuspenderObject::kStateOffset), Smi::FromInt(WasmSuspenderObject::kSuspended)); @@ -4118,6 +4118,8 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, wasm::OnResume on_resume) { FieldOperand(active_continuation, WasmContinuationObject::kJmpbufOffset), kWasmContinuationJmpbufTag, rdx); FillJumpBuffer(masm, current_jmpbuf, &suspend); + SwitchStackState(masm, current_jmpbuf, wasm::JumpBuffer::Active, + wasm::JumpBuffer::Inactive); current_jmpbuf = no_reg; // ------------------------------------------- diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 4f9bd6a616..034234f8be 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -835,6 +835,11 @@ RUNTIME_FUNCTION(Runtime_WasmAllocateSuspender) { active_suspender_slot.store(*suspender); SyncStackLimit(isolate); + wasm::JumpBuffer* jmpbuf = reinterpret_cast( + parent->ReadExternalPointerField( + WasmContinuationObject::kJmpbufOffset, isolate)); + DCHECK_EQ(jmpbuf->state, wasm::JumpBuffer::Active); + jmpbuf->state = wasm::JumpBuffer::Inactive; return *suspender; } From 7fce6dec89e9aa0e3759543ee764d7669e8dfb72 Mon Sep 17 00:00:00 2001 From: Tobias Tebbi Date: Wed, 14 Sep 2022 00:28:22 +0200 Subject: [PATCH 0100/1772] [turboshaft] performance tweak: source positions only if needed Change-Id: I2a35ae0d07bcd5c570bcaae8ae6ef886a5b5e926 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3852484 Reviewed-by: Nico Hartmann Commit-Queue: Tobias Tebbi Cr-Commit-Position: refs/heads/main@{#83185} --- src/compiler/compiler-source-position-table.cc | 6 ++++++ src/compiler/compiler-source-position-table.h | 5 +++++ src/compiler/pipeline.cc | 4 +++- src/compiler/turboshaft/graph-builder.cc | 2 +- src/compiler/turboshaft/recreate-schedule.cc | 2 +- test/cctest/test-cpu-profiler.cc | 4 ++++ 6 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/compiler/compiler-source-position-table.cc b/src/compiler/compiler-source-position-table.cc index 92643bee1a..a8cbae0a9d 100644 --- a/src/compiler/compiler-source-position-table.cc +++ b/src/compiler/compiler-source-position-table.cc @@ -32,11 +32,16 @@ SourcePositionTable::SourcePositionTable(Graph* graph) void SourcePositionTable::AddDecorator() { DCHECK_NULL(decorator_); + if (!enabled_) return; decorator_ = graph_->zone()->New(this); graph_->AddDecorator(decorator_); } void SourcePositionTable::RemoveDecorator() { + if (!enabled_) { + DCHECK_NULL(decorator_); + return; + } DCHECK_NOT_NULL(decorator_); graph_->RemoveDecorator(decorator_); decorator_ = nullptr; @@ -51,6 +56,7 @@ SourcePosition SourcePositionTable::GetSourcePosition(NodeId id) const { void SourcePositionTable::SetSourcePosition(Node* node, SourcePosition position) { + DCHECK(IsEnabled()); table_.Set(node, position); } diff --git a/src/compiler/compiler-source-position-table.h b/src/compiler/compiler-source-position-table.h index 8e3305ca1c..3b7978eafb 100644 --- a/src/compiler/compiler-source-position-table.h +++ b/src/compiler/compiler-source-position-table.h @@ -57,6 +57,10 @@ class V8_EXPORT_PRIVATE SourcePositionTable final } SourcePosition GetCurrentPosition() const { return current_position_; } + void Disable() { enabled_ = false; } + + bool IsEnabled() const { return enabled_; } + void PrintJson(std::ostream& os) const; private: @@ -70,6 +74,7 @@ class V8_EXPORT_PRIVATE SourcePositionTable final Decorator* decorator_; SourcePosition current_position_; NodeAuxData table_; + bool enabled_ = true; }; } // namespace compiler diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc index 25867b2685..babd0e8dd0 100644 --- a/src/compiler/pipeline.cc +++ b/src/compiler/pipeline.cc @@ -2799,7 +2799,9 @@ void PipelineImpl::InitializeHeapBroker() { TurboCfgFile tcf(isolate()); tcf << AsC1VCompilation(info()); } - + if (data->info()->bytecode_array()->SourcePositionTable().DataSize() == 0) { + data->source_positions()->Disable(); + } data->source_positions()->AddDecorator(); if (data->info()->trace_turbo_json()) { data->node_origins()->AddDecorator(); diff --git a/src/compiler/turboshaft/graph-builder.cc b/src/compiler/turboshaft/graph-builder.cc index a68f9ceed8..3ff92abdcd 100644 --- a/src/compiler/turboshaft/graph-builder.cc +++ b/src/compiler/turboshaft/graph-builder.cc @@ -224,7 +224,7 @@ base::Optional GraphBuilder::Run() { DCHECK_NULL(assembler.current_block()); } - if (source_positions) { + if (source_positions->IsEnabled()) { for (OpIndex index : assembler.graph().AllOperationIndices()) { compiler::NodeId origin = assembler.graph().operation_origins()[index].DecodeTurbofanNodeId(); diff --git a/src/compiler/turboshaft/recreate-schedule.cc b/src/compiler/turboshaft/recreate-schedule.cc index 4f4deb4507..52ee5369ca 100644 --- a/src/compiler/turboshaft/recreate-schedule.cc +++ b/src/compiler/turboshaft/recreate-schedule.cc @@ -156,7 +156,7 @@ void ScheduleBuilder::ProcessOperation(const Operation& op) { OpIndex index = input_graph.Index(op); DCHECK_LT(index.id(), nodes.size()); nodes[index.id()] = node; - if (source_positions && node) { + if (source_positions->IsEnabled() && node) { source_positions->SetSourcePosition(node, input_graph.source_positions()[index]); } diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc index 670afc0882..cf13e48735 100644 --- a/test/cctest/test-cpu-profiler.cc +++ b/test/cctest/test-cpu-profiler.cc @@ -1247,6 +1247,8 @@ static void TickLines(bool optimize) { i::Isolate* isolate = CcTest::i_isolate(); i::Factory* factory = isolate->factory(); i::HandleScope scope(isolate); + // Ensure that source positions are collected everywhere. + isolate->SetIsProfiling(true); base::EmbeddedVector script; base::EmbeddedVector prepare_opt; @@ -1850,6 +1852,8 @@ TEST(Inlining) { v8::Local env = CcTest::NewContext({PROFILER_EXTENSION_ID}); v8::Context::Scope context_scope(env); ProfilerHelper helper(env); + // Ensure that source positions are collected everywhere. + CcTest::i_isolate()->SetIsProfiling(true); CompileRun(inlining_test_source); v8::Local function = GetFunction(env, "start"); From 8e4e35090b897d44c3428ada4406c4a4746485ef Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Wed, 14 Sep 2022 00:08:35 +0200 Subject: [PATCH 0101/1772] [heap] Fix tests for MinorMC MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SimulateFullSpace starts with no LAB, iterates over pages and allocates all free space on each page. After the first page, the LAB is empty but is no longer null. Bug: v8:12612 Change-Id: I2c00b9ba68fdd5f60eda086ea940cb6e211a986e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891294 Commit-Queue: Omer Katz Reviewed-by: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#83186} --- test/cctest/heap/heap-utils.cc | 5 +++-- test/cctest/heap/test-array-buffer-tracker.cc | 4 ++-- test/unittests/heap/heap-unittest.cc | 3 +-- test/unittests/heap/heap-utils.cc | 5 +++-- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/test/cctest/heap/heap-utils.cc b/test/cctest/heap/heap-utils.cc index 3ff794a809..13f8daf5e9 100644 --- a/test/cctest/heap/heap-utils.cc +++ b/test/cctest/heap/heap-utils.cc @@ -139,8 +139,9 @@ void FillPageInPagedSpace(Page* page, std::vector>* out_handles) { DCHECK(page->SweepingDone()); PagedSpaceBase* paged_space = static_cast(page->owner()); - DCHECK_EQ(kNullAddress, paged_space->top()); - DCHECK(!page->Contains(paged_space->top())); + // Make sure the LAB is empty to guarantee that all free space is accounted + // for in the freelist. + DCHECK_EQ(paged_space->limit(), paged_space->top()); for (Page* p : *paged_space) { if (p != page) paged_space->UnlinkFreeListCategories(p); diff --git a/test/cctest/heap/test-array-buffer-tracker.cc b/test/cctest/heap/test-array-buffer-tracker.cc index 95c9b4630e..bcdb141538 100644 --- a/test/cctest/heap/test-array-buffer-tracker.cc +++ b/test/cctest/heap/test-array-buffer-tracker.cc @@ -3,6 +3,7 @@ // found in the LICENSE file. #include "src/api/api-inl.h" +#include "src/common/globals.h" #include "src/execution/isolate.h" #include "src/heap/array-buffer-sweeper.h" #include "src/heap/heap-inl.h" @@ -455,8 +456,7 @@ TEST(ArrayBuffer_ExternalBackingStoreSizeIncreasesMarkCompact) { v8::ArrayBuffer::New(isolate, kArraybufferSize); Handle buf1 = v8::Utils::OpenHandle(*ab1); CHECK(IsTracked(heap, *buf1)); - heap::GcAndSweep(heap, NEW_SPACE); - heap::GcAndSweep(heap, NEW_SPACE); + heap::GcAndSweep(heap, OLD_SPACE); Page* page_before_gc = Page::FromHeapObject(*buf1); heap::ForceEvacuationCandidate(page_before_gc); diff --git a/test/unittests/heap/heap-unittest.cc b/test/unittests/heap/heap-unittest.cc index 7db368c292..300602ee5c 100644 --- a/test/unittests/heap/heap-unittest.cc +++ b/test/unittests/heap/heap-unittest.cc @@ -373,8 +373,7 @@ TEST_F(HeapTest, Regress978156) { Heap* heap = isolate()->heap(); // 1. Ensure that the new space is empty. - CollectGarbage(NEW_SPACE); - CollectGarbage(NEW_SPACE); + GcAndSweep(OLD_SPACE); // 2. Fill the new space with FixedArrays. std::vector> arrays; SimulateFullSpace(heap->new_space(), &arrays); diff --git a/test/unittests/heap/heap-utils.cc b/test/unittests/heap/heap-utils.cc index 27194ea6b9..ea7d2a8002 100644 --- a/test/unittests/heap/heap-utils.cc +++ b/test/unittests/heap/heap-utils.cc @@ -54,8 +54,9 @@ void FillPageInPagedSpace(Page* page, std::vector>* out_handles) { DCHECK(page->SweepingDone()); PagedSpaceBase* paged_space = static_cast(page->owner()); - DCHECK_EQ(kNullAddress, paged_space->top()); - DCHECK(!page->Contains(paged_space->top())); + // Make sure the LAB is empty to guarantee that all free space is accounted + // for in the freelist. + DCHECK_EQ(paged_space->limit(), paged_space->top()); for (Page* p : *paged_space) { if (p != page) paged_space->UnlinkFreeListCategories(p); From 6d342fa52c9205d98cecd8e08c36f70ab8afd720 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Wed, 14 Sep 2022 13:19:17 +0200 Subject: [PATCH 0102/1772] [heap] Use std::unique_ptr for space_ array MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Document ownership with using std::unique_ptr for the space_ array. Bug: v8:13267 Change-Id: I12861d97cd52d2a8cf9ceb43a2f90008be87b2a3 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890913 Reviewed-by: Michael Lippautz Commit-Queue: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#83187} --- src/heap/heap-inl.h | 4 +-- src/heap/heap.cc | 60 ++++++++++++++++++++++++++++----------------- src/heap/heap.h | 2 +- 3 files changed, 40 insertions(+), 26 deletions(-) diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h index c58cc702ab..9050491aff 100644 --- a/src/heap/heap-inl.h +++ b/src/heap/heap-inl.h @@ -172,10 +172,10 @@ void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) { PagedSpace* Heap::paged_space(int idx) { DCHECK(idx == OLD_SPACE || idx == CODE_SPACE || idx == MAP_SPACE); - return static_cast(space_[idx]); + return static_cast(space_[idx].get()); } -Space* Heap::space(int idx) { return space_[idx]; } +Space* Heap::space(int idx) { return space_[idx].get(); } Address* Heap::NewSpaceAllocationTopAddress() { return new_space_ ? new_space_->allocation_top_address() : nullptr; diff --git a/src/heap/heap.cc b/src/heap/heap.cc index fba1e099fb..b36bf9e45b 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -5368,10 +5368,6 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) { concurrent_marking_.reset(new ConcurrentMarking(this, nullptr)); } - for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { - space_[i] = nullptr; - } - // Set up layout tracing callback. if (V8_UNLIKELY(v8_flags.trace_gc_heap_layout)) { v8::GCType gc_type = kGCTypeMarkSweepCompact; @@ -5390,7 +5386,7 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) { DCHECK_NOT_NULL(ro_heap); DCHECK_IMPLIES(read_only_space_ != nullptr, read_only_space_ == ro_heap->read_only_space()); - space_[RO_SPACE] = nullptr; + DCHECK_NULL(space_[RO_SPACE].get()); read_only_space_ = ro_heap->read_only_space(); heap_allocator_.SetReadOnlySpace(read_only_space_); } @@ -5433,30 +5429,49 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info, const bool has_young_gen = !v8_flags.single_generation && !IsShared(); if (has_young_gen) { if (v8_flags.minor_mc) { - space_[NEW_SPACE] = new_space_ = - new PagedNewSpace(this, initial_semispace_size_, max_semi_space_size_, - new_allocation_info); + space_[NEW_SPACE] = std::make_unique( + this, initial_semispace_size_, max_semi_space_size_, + new_allocation_info); } else { - space_[NEW_SPACE] = new_space_ = - new SemiSpaceNewSpace(this, initial_semispace_size_, - max_semi_space_size_, new_allocation_info); + space_[NEW_SPACE] = std::make_unique( + this, initial_semispace_size_, max_semi_space_size_, + new_allocation_info); } - space_[NEW_LO_SPACE] = new_lo_space_ = - new NewLargeObjectSpace(this, NewSpaceCapacity()); + new_space_ = static_cast(space_[NEW_SPACE].get()); + + space_[NEW_LO_SPACE] = + std::make_unique(this, NewSpaceCapacity()); + new_lo_space_ = + static_cast(space_[NEW_LO_SPACE].get()); } - space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info); - space_[CODE_SPACE] = code_space_ = new CodeSpace(this); + + space_[OLD_SPACE] = std::make_unique(this, old_allocation_info); + old_space_ = static_cast(space_[OLD_SPACE].get()); + + space_[CODE_SPACE] = std::make_unique(this); + code_space_ = static_cast(space_[CODE_SPACE].get()); + if (v8_flags.use_map_space) { - space_[MAP_SPACE] = map_space_ = new MapSpace(this); + space_[MAP_SPACE] = std::make_unique(this); + map_space_ = static_cast(space_[MAP_SPACE].get()); } + if (v8_flags.shared_space && isolate()->is_shared_space_isolate()) { - space_[SHARED_SPACE] = shared_space_ = new SharedSpace(this); + space_[SHARED_SPACE] = std::make_unique(this); + shared_space_ = static_cast(space_[SHARED_SPACE].get()); } - space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this); - space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this); + + space_[LO_SPACE] = std::make_unique(this); + lo_space_ = static_cast(space_[LO_SPACE].get()); + + space_[CODE_LO_SPACE] = std::make_unique(this); + code_lo_space_ = + static_cast(space_[CODE_LO_SPACE].get()); + if (v8_flags.shared_space && isolate()->is_shared_space_isolate()) { - space_[SHARED_LO_SPACE] = shared_lo_space_ = - new SharedLargeObjectSpace(this); + space_[SHARED_LO_SPACE] = std::make_unique(this); + shared_lo_space_ = + static_cast(space_[SHARED_LO_SPACE].get()); } for (int i = 0; i < static_cast(v8::Isolate::kUseCounterFeatureCount); @@ -5863,8 +5878,7 @@ void Heap::TearDown() { "Deletion of CODE_SPACE and CODE_LO_SPACE requires write access to " "Code page headers"); for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) { - delete space_[i]; - space_[i] = nullptr; + space_[i].reset(); } } diff --git a/src/heap/heap.h b/src/heap/heap.h index 429b2bb601..0a45bacd4f 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -2206,7 +2206,7 @@ class Heap { std::unique_ptr shared_map_allocator_; // Map from the space id to the space. - Space* space_[LAST_SPACE + 1]; + std::unique_ptr space_[LAST_SPACE + 1]; LocalHeap* main_thread_local_heap_ = nullptr; From fa5a13be8e0f1640a59f53f7d8939e9ef4769afe Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Wed, 14 Sep 2022 12:31:01 +0200 Subject: [PATCH 0103/1772] [test] Fix a rare numfuzz hang up The numfuzz fuzzer.py has a loop to send a new test after receiving a result. When all test processors go into stopped state, attempts of sending new tests return False. That case wasn't handled here and we kept looping forever. Bug: v8:13113 Change-Id: Ief2686614d9703fb590400ac3e73b6ac9008c8f6 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891373 Reviewed-by: Alexander Schulze Commit-Queue: Michael Achenbach Cr-Commit-Position: refs/heads/main@{#83188} --- tools/testrunner/testproc/fuzzer.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tools/testrunner/testproc/fuzzer.py b/tools/testrunner/testproc/fuzzer.py index 6c552d83ff..1bbf8bc1be 100644 --- a/tools/testrunner/testproc/fuzzer.py +++ b/tools/testrunner/testproc/fuzzer.py @@ -245,10 +245,11 @@ class FuzzerProc(base.TestProcProducer): i += 1 def _try_send_next_test(self, test): - if not self.is_stopped: - for subtest in self._gens[test.procid]: - if self._send_test(subtest): - return True + for subtest in self._gens[test.procid]: + if self._send_test(subtest): + return True + elif self.is_stopped: + return False del self._gens[test.procid] return False From cc371918095c22257d1f8d311304e6736562cf21 Mon Sep 17 00:00:00 2001 From: Jose Dapena Paz Date: Wed, 7 Sep 2022 18:37:17 +0200 Subject: [PATCH 0104/1772] IWYU: missing include for std::unique_lock in wasm module-compiler.cc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix build error: ../../v8/src/wasm/module-compiler.cc:147:10: error: ‘unique_lock’ is not a member of ‘std’ 147 | std::unique_lock queues_guard{queues_mutex_}; | ^~~~~~~~~~~ Bug: chromium:957519 Change-Id: I0d14730d5b8dd76820fcc0a47d66ab0bd3e38f24 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3880498 Commit-Queue: Jakob Kummerow Reviewed-by: Jakob Kummerow Cr-Commit-Position: refs/heads/main@{#83189} --- src/wasm/module-compiler.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc index d3f56a9517..55806d5141 100644 --- a/src/wasm/module-compiler.cc +++ b/src/wasm/module-compiler.cc @@ -5,6 +5,7 @@ #include "src/wasm/module-compiler.h" #include +#include // NOLINT(build/c++11) #include #include From 416d6308e07f31f8f00bf26aa6dceabd99a7c6e6 Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Fri, 2 Sep 2022 20:33:15 +0200 Subject: [PATCH 0105/1772] [heap] Turn on fuzzing for MinorMC Based on bots and local testing, MinorMC has reached a stable state in terms of correctness. Enable fuzzing with MinorMC to flush out additional issues. Bug: v8:12612 Change-Id: I9cf8c5791d7256ff63c777b295863506436ee165 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3872265 Reviewed-by: Almothana Athamneh Commit-Queue: Omer Katz Cr-Commit-Position: refs/heads/main@{#83190} --- tools/clusterfuzz/foozzie/v8_fuzz_flags.json | 3 ++- tools/clusterfuzz/trials/clusterfuzz_trials_config.json | 1 + tools/testrunner/testproc/fuzzer.py | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/tools/clusterfuzz/foozzie/v8_fuzz_flags.json b/tools/clusterfuzz/foozzie/v8_fuzz_flags.json index e10bf11045..4099053f86 100644 --- a/tools/clusterfuzz/foozzie/v8_fuzz_flags.json +++ b/tools/clusterfuzz/foozzie/v8_fuzz_flags.json @@ -31,5 +31,6 @@ [0.1, "--no-wasm-generic-wrapper"], [0.1, "--turbo-force-mid-tier-regalloc"], [0.0001, "--simulate-errors"], - [0.25, "--no-use-map-space"] + [0.25, "--no-use-map-space"], + [0.1, "--minor-mc"] ] diff --git a/tools/clusterfuzz/trials/clusterfuzz_trials_config.json b/tools/clusterfuzz/trials/clusterfuzz_trials_config.json index e1acbb0d98..a6b90de8d9 100644 --- a/tools/clusterfuzz/trials/clusterfuzz_trials_config.json +++ b/tools/clusterfuzz/trials/clusterfuzz_trials_config.json @@ -7,6 +7,7 @@ {"app_args": "--interrupt-budget=1000", "app_name": "d8", "probability": 0.25}, {"app_args": "--jitless", "app_name": "d8", "probability": 0.1}, {"app_args": "--maglev", "app_name": "d8", "probability": 0.1, "contradicts": ["--jitless"]}, + {"app_args": "--minor-mc", "app_name": "d8", "probability": 0.1}, {"app_args": "--random-gc-interval=2000", "app_name": "d8", "probability": 0.05}, {"app_args": "--noanalyze-environment-liveness", "app_name": "d8", "probability": 0.1}, {"app_args": "--no-enable-avx", "app_name": "d8", "probability": 0.1}, diff --git a/tools/testrunner/testproc/fuzzer.py b/tools/testrunner/testproc/fuzzer.py index 1bbf8bc1be..2d7fab6653 100644 --- a/tools/testrunner/testproc/fuzzer.py +++ b/tools/testrunner/testproc/fuzzer.py @@ -21,6 +21,7 @@ EXTRA_FLAGS = [ # (0.1, '--interrupt-budget-for-maglev=100'), (0.1, '--liftoff'), (0.1, '--maglev'), + (0.1, '--minor-mc'), (0.2, '--no-analyze-environment-liveness'), # TODO(machenbach): Enable when it doesn't collide with crashing on missing # simd features. From 7096496aca295ee2ae0c49d5dc00e8524073a556 Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Fri, 9 Sep 2022 11:13:56 +0000 Subject: [PATCH 0106/1772] Revert "[heap] Remove MinorMC variant from bots" This reverts commit 7c64e5b43b88c69543a40af7a76d38b218917540. Reason for revert: MinorMC passes all CQ bots again (crrev.com/c/3872266) Original change's description: > [heap] Remove MinorMC variant from bots > > As part of revising MinorMC, it would soon be broken and bots should > not be red because of it. > > Bug: v8:12612 > Change-Id: I0551d0a115ac2f4fa7fc32190458850f80b84cf5 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3810353 > Commit-Queue: Omer Katz > Reviewed-by: Almothana Athamneh > Cr-Commit-Position: refs/heads/main@{#82197} Bug: v8:12612 Change-Id: I4a08f79efc3b5fc133a0a920a11d2af559b5bf4f Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885890 Commit-Queue: Omer Katz Reviewed-by: Almothana Athamneh Cr-Commit-Position: refs/heads/main@{#83191} --- infra/testing/builders.pyl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/infra/testing/builders.pyl b/infra/testing/builders.pyl index dffb14dfa8..ca2fab5eac 100644 --- a/infra/testing/builders.pyl +++ b/infra/testing/builders.pyl @@ -376,6 +376,7 @@ {'name': 'test262', 'variant': 'extra', 'shards': 9}, {'name': 'v8testing', 'shards': 5}, {'name': 'v8testing', 'variant': 'extra', 'shards': 5}, + {'name': 'v8testing', 'variant': 'minor_mc'}, {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, {'name': 'v8testing', 'variant': 'stress_concurrent_allocation'}, @@ -592,6 +593,7 @@ {'name': 'v8initializers'}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing', 'variant': 'minor_mc'}, {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'slow_path'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, @@ -1257,6 +1259,7 @@ {'name': 'v8testing'}, {'name': 'v8testing', 'variant': 'assert_types'}, {'name': 'v8testing', 'variant': 'extra'}, + {'name': 'v8testing', 'variant': 'minor_mc'}, {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, # Maglev -- move to extra once more architectures are supported. @@ -1316,6 +1319,7 @@ {'name': 'test262', 'variant': 'extra', 'shards': 5}, {'name': 'v8testing', 'shards': 2}, {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, + {'name': 'v8testing', 'variant': 'minor_mc'}, {'name': 'v8testing', 'variant': 'no_lfa'}, {'name': 'v8testing', 'variant': 'slow_path'}, {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, From b4cd59c3831f70ffa38b7e00b20a75f0e3840c6d Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Wed, 14 Sep 2022 13:50:24 +0000 Subject: [PATCH 0107/1772] Revert "[heap] Use std::unique_ptr for space_ array" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 6d342fa52c9205d98cecd8e08c36f70ab8afd720. Reason for revert: Needed to land: https://crrev.com/c/3892788 Original change's description: > [heap] Use std::unique_ptr for space_ array > > Document ownership with using std::unique_ptr for the space_ > array. > > Bug: v8:13267 > Change-Id: I12861d97cd52d2a8cf9ceb43a2f90008be87b2a3 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3890913 > Reviewed-by: Michael Lippautz > Commit-Queue: Dominik Inführ > Cr-Commit-Position: refs/heads/main@{#83187} Bug: v8:13267 Change-Id: Ieeb29454e146ee763130c0031af3f7a48b4eec94 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3895811 Commit-Queue: Michael Achenbach Owners-Override: Michael Achenbach Bot-Commit: Rubber Stamper Auto-Submit: Michael Achenbach Cr-Commit-Position: refs/heads/main@{#83192} --- src/heap/heap-inl.h | 4 +-- src/heap/heap.cc | 60 +++++++++++++++++---------------------------- src/heap/heap.h | 2 +- 3 files changed, 26 insertions(+), 40 deletions(-) diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h index 9050491aff..c58cc702ab 100644 --- a/src/heap/heap-inl.h +++ b/src/heap/heap-inl.h @@ -172,10 +172,10 @@ void Heap::SetPendingOptimizeForTestBytecode(Object hash_table) { PagedSpace* Heap::paged_space(int idx) { DCHECK(idx == OLD_SPACE || idx == CODE_SPACE || idx == MAP_SPACE); - return static_cast(space_[idx].get()); + return static_cast(space_[idx]); } -Space* Heap::space(int idx) { return space_[idx].get(); } +Space* Heap::space(int idx) { return space_[idx]; } Address* Heap::NewSpaceAllocationTopAddress() { return new_space_ ? new_space_->allocation_top_address() : nullptr; diff --git a/src/heap/heap.cc b/src/heap/heap.cc index b36bf9e45b..fba1e099fb 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -5368,6 +5368,10 @@ void Heap::SetUp(LocalHeap* main_thread_local_heap) { concurrent_marking_.reset(new ConcurrentMarking(this, nullptr)); } + for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { + space_[i] = nullptr; + } + // Set up layout tracing callback. if (V8_UNLIKELY(v8_flags.trace_gc_heap_layout)) { v8::GCType gc_type = kGCTypeMarkSweepCompact; @@ -5386,7 +5390,7 @@ void Heap::SetUpFromReadOnlyHeap(ReadOnlyHeap* ro_heap) { DCHECK_NOT_NULL(ro_heap); DCHECK_IMPLIES(read_only_space_ != nullptr, read_only_space_ == ro_heap->read_only_space()); - DCHECK_NULL(space_[RO_SPACE].get()); + space_[RO_SPACE] = nullptr; read_only_space_ = ro_heap->read_only_space(); heap_allocator_.SetReadOnlySpace(read_only_space_); } @@ -5429,49 +5433,30 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info, const bool has_young_gen = !v8_flags.single_generation && !IsShared(); if (has_young_gen) { if (v8_flags.minor_mc) { - space_[NEW_SPACE] = std::make_unique( - this, initial_semispace_size_, max_semi_space_size_, - new_allocation_info); + space_[NEW_SPACE] = new_space_ = + new PagedNewSpace(this, initial_semispace_size_, max_semi_space_size_, + new_allocation_info); } else { - space_[NEW_SPACE] = std::make_unique( - this, initial_semispace_size_, max_semi_space_size_, - new_allocation_info); + space_[NEW_SPACE] = new_space_ = + new SemiSpaceNewSpace(this, initial_semispace_size_, + max_semi_space_size_, new_allocation_info); } - new_space_ = static_cast(space_[NEW_SPACE].get()); - - space_[NEW_LO_SPACE] = - std::make_unique(this, NewSpaceCapacity()); - new_lo_space_ = - static_cast(space_[NEW_LO_SPACE].get()); + space_[NEW_LO_SPACE] = new_lo_space_ = + new NewLargeObjectSpace(this, NewSpaceCapacity()); } - - space_[OLD_SPACE] = std::make_unique(this, old_allocation_info); - old_space_ = static_cast(space_[OLD_SPACE].get()); - - space_[CODE_SPACE] = std::make_unique(this); - code_space_ = static_cast(space_[CODE_SPACE].get()); - + space_[OLD_SPACE] = old_space_ = new OldSpace(this, old_allocation_info); + space_[CODE_SPACE] = code_space_ = new CodeSpace(this); if (v8_flags.use_map_space) { - space_[MAP_SPACE] = std::make_unique(this); - map_space_ = static_cast(space_[MAP_SPACE].get()); + space_[MAP_SPACE] = map_space_ = new MapSpace(this); } - if (v8_flags.shared_space && isolate()->is_shared_space_isolate()) { - space_[SHARED_SPACE] = std::make_unique(this); - shared_space_ = static_cast(space_[SHARED_SPACE].get()); + space_[SHARED_SPACE] = shared_space_ = new SharedSpace(this); } - - space_[LO_SPACE] = std::make_unique(this); - lo_space_ = static_cast(space_[LO_SPACE].get()); - - space_[CODE_LO_SPACE] = std::make_unique(this); - code_lo_space_ = - static_cast(space_[CODE_LO_SPACE].get()); - + space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this); + space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this); if (v8_flags.shared_space && isolate()->is_shared_space_isolate()) { - space_[SHARED_LO_SPACE] = std::make_unique(this); - shared_lo_space_ = - static_cast(space_[SHARED_LO_SPACE].get()); + space_[SHARED_LO_SPACE] = shared_lo_space_ = + new SharedLargeObjectSpace(this); } for (int i = 0; i < static_cast(v8::Isolate::kUseCounterFeatureCount); @@ -5878,7 +5863,8 @@ void Heap::TearDown() { "Deletion of CODE_SPACE and CODE_LO_SPACE requires write access to " "Code page headers"); for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) { - space_[i].reset(); + delete space_[i]; + space_[i] = nullptr; } } diff --git a/src/heap/heap.h b/src/heap/heap.h index 0a45bacd4f..429b2bb601 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -2206,7 +2206,7 @@ class Heap { std::unique_ptr shared_map_allocator_; // Map from the space id to the space. - std::unique_ptr space_[LAST_SPACE + 1]; + Space* space_[LAST_SPACE + 1]; LocalHeap* main_thread_local_heap_ = nullptr; From 7e4ee6862e122379703ce40c63e85e66a73b85c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Samuel=20Gro=C3=9F?= Date: Wed, 14 Sep 2022 11:51:05 +0000 Subject: [PATCH 0108/1772] [sandbox] Introduce ExternalPointerTable::Freelist MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This struct represents the freelist of an ExternalPointerTable and contains both the size and the head of the freelist. It is encoded and stored as a single Atomic64 field (freelist_) inside the ExternalPointerTable class. This ensures that the freelist head and size are always synchronized. Previously, the freelist size was encoded in freelist entries in the top bits. This only works as long as the maximum table size is relatively small however, as it requires both the freelist size and the index of the next entry on the list to fit into 24 bits. To allow for bigger maximum table sizes in the future, this CL moves the freelist size directly into the table as part of the freelist_ field. Bug: v8:10391 Change-Id: Id09c9b28d09d79b704ac47e6566029cfb209ecd1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3891256 Commit-Queue: Samuel Groß Reviewed-by: Igor Sheludko Cr-Commit-Position: refs/heads/main@{#83193} --- include/v8-internal.h | 4 - src/execution/isolate.cc | 4 - src/sandbox/external-pointer-table-inl.h | 75 ++++++++------- src/sandbox/external-pointer-table.cc | 45 ++++----- src/sandbox/external-pointer-table.h | 116 +++++++++++++++-------- 5 files changed, 139 insertions(+), 105 deletions(-) diff --git a/include/v8-internal.h b/include/v8-internal.h index 2009db9cbb..c0a6542c5d 100644 --- a/include/v8-internal.h +++ b/include/v8-internal.h @@ -520,10 +520,6 @@ class Internals { // ExternalPointerTable layout guarantees. static const int kExternalPointerTableBufferOffset = 0; - static const int kExternalPointerTableCapacityOffset = - kExternalPointerTableBufferOffset + kApiSystemPointerSize; - static const int kExternalPointerTableFreelistHeadOffset = - kExternalPointerTableCapacityOffset + kApiInt32Size; static const int kExternalPointerTableSize = 4 * kApiSystemPointerSize; // IsolateData layout guarantees. diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc index 6061ae5868..44e516dc61 100644 --- a/src/execution/isolate.cc +++ b/src/execution/isolate.cc @@ -3469,10 +3469,6 @@ void Isolate::CheckIsolateLayout() { #ifdef V8_ENABLE_SANDBOX CHECK_EQ(static_cast(OFFSET_OF(ExternalPointerTable, buffer_)), Internals::kExternalPointerTableBufferOffset); - CHECK_EQ(static_cast(OFFSET_OF(ExternalPointerTable, capacity_)), - Internals::kExternalPointerTableCapacityOffset); - CHECK_EQ(static_cast(OFFSET_OF(ExternalPointerTable, freelist_head_)), - Internals::kExternalPointerTableFreelistHeadOffset); CHECK_EQ(static_cast(sizeof(ExternalPointerTable)), Internals::kExternalPointerTableSize); CHECK_EQ(static_cast(sizeof(ExternalPointerTable)), diff --git a/src/sandbox/external-pointer-table-inl.h b/src/sandbox/external-pointer-table-inl.h index 5c47ffeb6c..1e4ff34e61 100644 --- a/src/sandbox/external-pointer-table-inl.h +++ b/src/sandbox/external-pointer-table-inl.h @@ -47,22 +47,27 @@ Address ExternalPointerTable::Exchange(ExternalPointerHandle handle, return old_entry.Untag(tag); } -bool ExternalPointerTable::TryAllocateEntryFromFreelist( - uint32_t freelist_head) { - DCHECK(freelist_head); - DCHECK_LT(freelist_head, capacity()); +bool ExternalPointerTable::TryAllocateEntryFromFreelist(Freelist freelist) { + DCHECK(!freelist.IsEmpty()); + DCHECK_LT(freelist.Head(), capacity()); + DCHECK_LT(freelist.Size(), capacity()); - Entry entry = RelaxedLoad(freelist_head); + Entry entry = RelaxedLoad(freelist.Head()); uint32_t new_freelist_head = entry.ExtractNextFreelistEntry(); - uint32_t old_val = base::Relaxed_CompareAndSwap( - &freelist_head_, freelist_head, new_freelist_head); - bool success = old_val == freelist_head; + Freelist new_freelist(new_freelist_head, freelist.Size() - 1); + bool success = Relaxed_CompareAndSwapFreelist(freelist, new_freelist); // When the CAS succeeded, the entry must've been a freelist entry. // Otherwise, this is not guaranteed as another thread may have allocated // the same entry in the meantime. - DCHECK(!success || entry.IsFreelistEntry()); + if (success) { + DCHECK(entry.IsFreelistEntry()); + DCHECK_LT(new_freelist.Head(), capacity()); + DCHECK_LT(new_freelist.Size(), capacity()); + DCHECK_IMPLIES(freelist.Size() > 1, !new_freelist.IsEmpty()); + DCHECK_IMPLIES(freelist.Size() == 1, new_freelist.IsEmpty()); + } return success; } @@ -70,7 +75,7 @@ ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry( Isolate* isolate, Address initial_value, ExternalPointerTag tag) { DCHECK(is_initialized()); - uint32_t freelist_head; + Freelist freelist; bool success = false; while (!success) { // This is essentially DCLP (see @@ -78,58 +83,62 @@ ExternalPointerHandle ExternalPointerTable::AllocateAndInitializeEntry( // and so requires an acquire load as well as a release store in Grow() to // prevent reordering of memory accesses, which could for example cause one // thread to read a freelist entry before it has been properly initialized. - freelist_head = base::Acquire_Load(&freelist_head_); - if (!freelist_head) { + freelist = Acquire_GetFreelist(); + if (freelist.IsEmpty()) { // Freelist is empty. Need to take the lock, then attempt to grow the // table if no other thread has done it in the meantime. base::MutexGuard guard(mutex_); // Reload freelist head in case another thread already grew the table. - freelist_head = base::Relaxed_Load(&freelist_head_); + freelist = Relaxed_GetFreelist(); - if (!freelist_head) { + if (freelist.IsEmpty()) { // Freelist is (still) empty so grow the table. - freelist_head = Grow(isolate); + freelist = Grow(isolate); + // Grow() adds one block to the table and so to the freelist. + DCHECK_EQ(freelist.Size(), kEntriesPerBlock); } } - success = TryAllocateEntryFromFreelist(freelist_head); + success = TryAllocateEntryFromFreelist(freelist); } - Entry entry = Entry::MakeRegularEntry(initial_value, tag); - RelaxedStore(freelist_head, entry); + DCHECK_NE(freelist.Head(), 0); + DCHECK_LT(freelist.Head(), capacity()); - return IndexToHandle(freelist_head); + uint32_t entry_index = freelist.Head(); + Entry entry = Entry::MakeRegularEntry(initial_value, tag); + RelaxedStore(entry_index, entry); + + return IndexToHandle(entry_index); } ExternalPointerHandle ExternalPointerTable::AllocateEvacuationEntry( uint32_t start_of_evacuation_area) { DCHECK(is_initialized()); + DCHECK_LT(start_of_evacuation_area, capacity()); - uint32_t freelist_head; + Freelist freelist; bool success = false; while (!success) { - freelist_head = base::Acquire_Load(&freelist_head_); + freelist = Acquire_GetFreelist(); // Check that the next free entry is below the start of the evacuation area. - if (!freelist_head || freelist_head >= start_of_evacuation_area) + if (freelist.IsEmpty() || freelist.Head() >= start_of_evacuation_area) return kNullExternalPointerHandle; - success = TryAllocateEntryFromFreelist(freelist_head); + success = TryAllocateEntryFromFreelist(freelist); } - return IndexToHandle(freelist_head); + DCHECK_NE(freelist.Head(), 0); + DCHECK_LT(freelist.Head(), start_of_evacuation_area); + + return IndexToHandle(freelist.Head()); } uint32_t ExternalPointerTable::FreelistSize() { - Entry entry; - do { - uint32_t freelist_head = base::Relaxed_Load(&freelist_head_); - if (!freelist_head) return 0; - entry = RelaxedLoad(freelist_head); - } while (!entry.IsFreelistEntry()); - uint32_t freelist_size = entry.ExtractFreelistSize(); - DCHECK_LE(freelist_size, capacity()); - return freelist_size; + Freelist freelist = Relaxed_GetFreelist(); + DCHECK_LE(freelist.Size(), capacity()); + return freelist.Size(); } void ExternalPointerTable::Mark(ExternalPointerHandle handle, diff --git a/src/sandbox/external-pointer-table.cc b/src/sandbox/external-pointer-table.cc index 3f57b87569..c9599e571b 100644 --- a/src/sandbox/external-pointer-table.cc +++ b/src/sandbox/external-pointer-table.cc @@ -82,16 +82,16 @@ void ExternalPointerTable::TearDown() { buffer_ = kNullAddress; capacity_ = 0; - freelist_head_ = 0; + freelist_ = 0; mutex_ = nullptr; } uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) { // There must not be any entry allocations while the table is being swept as - // that would not be safe. Set the freelist head to this special marker value - // to better catch any violation of this requirement. - uint32_t old_freelist_head = base::Relaxed_Load(&freelist_head_); - base::Release_Store(&freelist_head_, kTableIsCurrentlySweepingMarker); + // that would not be safe. Set the freelist to this special marker value to + // better catch any violation of this requirement. + Freelist old_freelist = Relaxed_GetFreelist(); + base::Release_Store(&freelist_, kTableIsCurrentlySweepingMarker); // Keep track of the last block (identified by the index of its first entry) // that has live entries. Used to decommit empty blocks at the end. @@ -112,14 +112,14 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) { // Extract the original start_of_evacuation_area value so that the // DCHECKs below work correctly. first_block_of_evacuation_area &= ~kCompactionAbortedMarker; - } else if (!old_freelist_head || - old_freelist_head > first_block_of_evacuation_area) { + } else if (old_freelist.IsEmpty() || + old_freelist.Head() > first_block_of_evacuation_area) { // In this case, marking finished successfully, but the application // afterwards allocated entries inside the area that is being compacted. // In this case, we can still compute how many blocks at the end of the // table are now empty. - if (old_freelist_head) { - last_in_use_block = RoundDown(old_freelist_head, kEntriesPerBlock); + if (!old_freelist.IsEmpty()) { + last_in_use_block = RoundDown(old_freelist.Head(), kEntriesPerBlock); } outcome = TableCompactionOutcome::kPartialSuccess; } else { @@ -206,8 +206,7 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) { // compaction was already aborted during marking. } else if (!entry.IsMarked()) { current_freelist_size++; - Entry entry = Entry::MakeFreelistEntry(current_freelist_head, - current_freelist_size); + Entry entry = Entry::MakeFreelistEntry(current_freelist_head); Store(i, entry); current_freelist_head = i; } else { @@ -247,7 +246,8 @@ uint32_t ExternalPointerTable::SweepAndCompact(Isolate* isolate) { StopCompacting(); } - base::Release_Store(&freelist_head_, current_freelist_head); + Freelist new_freelist(current_freelist_head, current_freelist_size); + Release_SetFreelist(new_freelist); uint32_t num_active_entries = capacity() - current_freelist_size; isolate->counters()->external_pointers_count()->AddSample(num_active_entries); @@ -285,9 +285,9 @@ void ExternalPointerTable::StopCompacting() { set_start_of_evacuation_area(kNotCompactingMarker); } -uint32_t ExternalPointerTable::Grow(Isolate* isolate) { - // Freelist should be empty. - DCHECK_EQ(0, freelist_head_); +ExternalPointerTable::Freelist ExternalPointerTable::Grow(Isolate* isolate) { + // Freelist should be empty when calling this method. + DCHECK(Relaxed_GetFreelist().IsEmpty()); // Mutex must be held when calling this method. mutex_->AssertHeld(); @@ -324,18 +324,19 @@ uint32_t ExternalPointerTable::Grow(Isolate* isolate) { // Build freelist bottom to top, which might be more cache friendly. uint32_t start = std::max(old_capacity, 1); // Skip entry zero uint32_t last = new_capacity - 1; - uint32_t current_freelist_size = 1; for (uint32_t i = start; i < last; i++) { - uint32_t next_entry = i + 1; - Store(i, Entry::MakeFreelistEntry(next_entry, current_freelist_size++)); + uint32_t next_free_entry = i + 1; + Store(i, Entry::MakeFreelistEntry(next_free_entry)); } - Store(last, Entry::MakeFreelistEntry(0, current_freelist_size)); + Store(last, Entry::MakeFreelistEntry(0)); // This must be a release store to prevent reordering of the preceeding // stores to the freelist from being reordered past this store. See - // Allocate() for more details. - base::Release_Store(&freelist_head_, start); - return start; + // AllocateAndInitializeEntry() for more details. + Freelist new_freelist(start, last - start + 1); + Release_SetFreelist(new_freelist); + + return new_freelist; } } // namespace internal diff --git a/src/sandbox/external-pointer-table.h b/src/sandbox/external-pointer-table.h index 59445971a5..eb76c35e54 100644 --- a/src/sandbox/external-pointer-table.h +++ b/src/sandbox/external-pointer-table.h @@ -206,12 +206,12 @@ class V8_EXPORT_PRIVATE ExternalPointerTable { #endif static constexpr size_t kEntriesPerBlock = kBlockSize / kSystemPointerSize; - // When the table is swept, it first sets the freelist head to this special - // value to better catch any violation of the "don't-alloc-while-sweeping" - // requirement (see SweepAndCompact()). This value is chosen so it points to - // the last entry in the table, which should usually be inaccessible. - static constexpr uint32_t kTableIsCurrentlySweepingMarker = - (kExternalPointerTableReservationSize / kSystemPointerSize) - 1; + // When the table is swept, it first sets the freelist_ to this special value + // to better catch any violation of the "don't-alloc-while-sweeping" + // requirement (see SweepAndCompact()). This value should never occur as + // freelist_ value during normal operations and should be easy to recognize. + static constexpr uint64_t kTableIsCurrentlySweepingMarker = + static_cast(-1); // This value is used for start_of_evacuation_area to indicate that the table // is not currently being compacted. It is set to uint32_t max so that @@ -265,6 +265,55 @@ class V8_EXPORT_PRIVATE ExternalPointerTable { base::Relaxed_Store(&start_of_evacuation_area_, value); } + // Struct to represent the freelist of a table. + // In it's encoded form, this is stored in the freelist_ member of the table. + class Freelist { + public: + Freelist() : encoded_(0) {} + Freelist(uint32_t head, uint32_t size) + : encoded_((static_cast(size) << 32) | head) {} + + uint32_t Head() const { return static_cast(encoded_); } + uint32_t Size() const { return static_cast(encoded_ >> 32); } + + bool IsEmpty() const { + DCHECK_EQ(Head() == 0, Size() == 0); + return encoded_ == 0; + } + + uint64_t Encode() const { return encoded_; } + + static Freelist Decode(uint64_t encoded_form) { + DCHECK_NE(encoded_form, kTableIsCurrentlySweepingMarker); + return Freelist(encoded_form); + } + + private: + explicit Freelist(uint64_t encoded_form) : encoded_(encoded_form) {} + + uint64_t encoded_; + }; + + // Freelist accessors. + Freelist Relaxed_GetFreelist() { + return Freelist::Decode(base::Relaxed_Load(&freelist_)); + } + Freelist Acquire_GetFreelist() { + return Freelist::Decode(base::Acquire_Load(&freelist_)); + } + void Relaxed_SetFreelist(Freelist new_freelist) { + base::Relaxed_Store(&freelist_, new_freelist.Encode()); + } + void Release_SetFreelist(Freelist new_freelist) { + base::Release_Store(&freelist_, new_freelist.Encode()); + } + bool Relaxed_CompareAndSwapFreelist(Freelist old_freelist, + Freelist new_freelist) { + uint64_t old_val = base::Relaxed_CompareAndSwap( + &freelist_, old_freelist.Encode(), new_freelist.Encode()); + return old_val == old_freelist.Encode(); + } + // Allocate an entry suitable as evacuation entry during table compaction. // // This method will always return an entry before the start of the evacuation @@ -285,14 +334,15 @@ class V8_EXPORT_PRIVATE ExternalPointerTable { // This method is mostly a wrapper around an atomic compare-and-swap which // replaces the current freelist_head with the next entry in the freelist, // thereby allocating the entry at the start of the freelist. - inline bool TryAllocateEntryFromFreelist(uint32_t freelist_head); + inline bool TryAllocateEntryFromFreelist(Freelist freelist); // Extends the table and adds newly created entries to the freelist. Returns - // the new freelist head. When calling this method, mutex_ must be locked. + // the new freelist. + // When calling this method, mutex_ must be locked. // If the table cannot be grown, either because it is already at its maximum // size or because the memory for it could not be allocated, this method will // fail with an OOM crash. - uint32_t Grow(Isolate* isolate); + Freelist Grow(Isolate* isolate); // Stop compacting at the end of sweeping. void StopCompacting(); @@ -359,8 +409,7 @@ class V8_EXPORT_PRIVATE ExternalPointerTable { void SetMarkBit() { value_ |= kExternalPointerMarkBit; } void ClearMarkBit() { value_ &= ~kExternalPointerMarkBit; } - // Returns true if this entry is part of the freelist, in which case - // ExtractNextFreelistEntry and ExtractFreelistSize may be used. + // Returns true if this entry is part of the freelist. bool IsFreelistEntry() const { return HasTag(kExternalPointerFreeEntryTag); } @@ -381,14 +430,7 @@ class V8_EXPORT_PRIVATE ExternalPointerTable { // is only valid if this is a freelist entry. This behaviour is required // for efficient entry allocation, see TryAllocateEntryFromFreelist. uint32_t ExtractNextFreelistEntry() const { - return static_cast(value_) & 0x00ffffff; - } - - // Extract the size of the freelist following this entry. Must only be - // called if this is a freelist entry. See also MakeFreelistEntry. - uint32_t ExtractFreelistSize() const { - DCHECK(IsFreelistEntry()); - return static_cast(value_ >> 24) & 0x00ffffff; + return static_cast(value_); } // An evacuation entry contains the address of the Handle to a (regular) @@ -410,26 +452,11 @@ class V8_EXPORT_PRIVATE ExternalPointerTable { } // Constructs a freelist entry given the current freelist head and size. - static Entry MakeFreelistEntry(uint32_t current_freelist_head, - uint32_t current_freelist_size) { - // The next freelist entry is stored in the lower 24 bits of the entry. - // The freelist size is stored in the next 24 bits. If we ever need larger - // tables, and therefore larger indices to encode the next free entry, we - // can make the freelist size an approximation and drop some of the bottom - // bits of the value when encoding it. - // We could also keep the freelist size as an additional uint32_t member, - // but encoding it in this way saves one atomic compare-exchange on every - // entry allocation. - static_assert(kMaxExternalPointers <= (1ULL << 24)); - static_assert(kExternalPointerFreeEntryTag >= (1ULL << 48)); - DCHECK_LT(current_freelist_head, kMaxExternalPointers); - DCHECK_LT(current_freelist_size, kMaxExternalPointers); - - Address value = current_freelist_size; - value <<= 24; - value |= current_freelist_head; - value |= kExternalPointerFreeEntryTag; - return Entry(value); + static Entry MakeFreelistEntry(uint32_t next_freelist_entry) { + // The next freelist entry is stored in the lower bits of the entry. + static_assert(kMaxExternalPointers < (1ULL << kExternalPointerTagShift)); + DCHECK_LT(next_freelist_entry, kMaxExternalPointers); + return Entry(next_freelist_entry | kExternalPointerFreeEntryTag); } // Constructs an evacuation entry containing the given handle location. @@ -538,9 +565,6 @@ class V8_EXPORT_PRIVATE ExternalPointerTable { // The current capacity of this table, which is the number of usable entries. base::Atomic32 capacity_ = 0; - // The index of the first entry on the freelist or zero if the list is empty. - base::Atomic32 freelist_head_ = 0; - // When compacting the table, this value contains the index of the first // entry in the evacuation area. The evacuation area is the region at the end // of the table from which entries are moved out of so that the underyling @@ -556,6 +580,14 @@ class V8_EXPORT_PRIVATE ExternalPointerTable { // background threads during GC marking (for example to abort compaction). base::Atomic32 start_of_evacuation_area_ = kNotCompactingMarker; + // The freelist used by this table. + // This field stores an (encoded) Freelist struct, i.e. the index of the + // current head of the freelist and the current size of the freelist. These + // two values need to be updated together (in a single atomic word) so they + // stay correctly synchronized when entries are allocated from the freelist + // from multiple threads. + base::Atomic64 freelist_ = 0; + // Lock protecting the slow path for entry allocation, in particular Grow(). // As the size of this structure must be predictable (it's part of // IsolateData), it cannot directly contain a Mutex and so instead contains a From 67cbe057dae41a6f4e7567ed7fa8665a881ca2f0 Mon Sep 17 00:00:00 2001 From: Michael Achenbach Date: Wed, 14 Sep 2022 13:47:46 +0000 Subject: [PATCH 0109/1772] Revert "[heap] Add shared spaces for --shared-space" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 10756bea834a35ddc604e21e077487ecf0aa3de9. Reason for revert: Test failures on GPU and other Chromium bots: https://ci.chromium.org/ui/p/v8/builders/ci/Linux%20V8%20FYI%20Release%20(NVIDIA)/21271/overview https://luci-milo.appspot.com/ui/inv/build-8803047917676096065/test-results?q=V8MemoryDumpProviderTest.DumpGlobalHandlesSize Original change's description: > [heap] Add shared spaces for --shared-space > > This CL adds shared spaces for regular and large objects in the shared > space isolate. Spaces aren't used for allocation yet. > > Bug: v8:13267 > Change-Id: If508144530f4c9a1b3c0567570165955b64cc200 > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3876824 > Reviewed-by: Jakob Linke > Commit-Queue: Dominik Inführ > Reviewed-by: Michael Lippautz > Cr-Commit-Position: refs/heads/main@{#83178} Bug: v8:13267 Change-Id: Iefa01243ae8bebaba5cda8426a5aa0f4fd306bf3 No-Presubmit: true No-Tree-Checks: true No-Try: true Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892788 Owners-Override: Michael Achenbach Auto-Submit: Michael Achenbach Commit-Queue: Michael Achenbach Bot-Commit: Rubber Stamper Cr-Commit-Position: refs/heads/main@{#83194} --- src/common/globals.h | 24 +++++++++---------- src/flags/flag-definitions.h | 2 -- src/heap/base-space.cc | 4 ---- src/heap/heap-allocator.cc | 2 +- src/heap/heap-inl.h | 2 -- src/heap/heap.cc | 45 ++++++++++-------------------------- src/heap/heap.h | 13 ++++------- src/heap/large-spaces.cc | 10 -------- src/heap/large-spaces.h | 8 ------- src/heap/paged-spaces.h | 26 --------------------- src/snapshot/serializer.cc | 2 -- 11 files changed, 29 insertions(+), 109 deletions(-) diff --git a/src/common/globals.h b/src/common/globals.h index 34a58f5201..68634d7d1c 100644 --- a/src/common/globals.h +++ b/src/common/globals.h @@ -969,22 +969,20 @@ using WeakSlotCallbackWithHeap = bool (*)(Heap* heap, FullObjectSlot pointer); // NOTE: SpaceIterator depends on AllocationSpace enumeration values being // consecutive. enum AllocationSpace { - RO_SPACE, // Immortal, immovable and immutable objects, - OLD_SPACE, // Old generation regular object space. - CODE_SPACE, // Old generation code object space, marked executable. - MAP_SPACE, // Old generation map object space, non-movable. - NEW_SPACE, // Young generation space for regular objects collected - // with Scavenger/MinorMC. - SHARED_SPACE, // Space shared between multiple isolates. Optional. - LO_SPACE, // Old generation large object space. - CODE_LO_SPACE, // Old generation large code object space. - NEW_LO_SPACE, // Young generation large object space. - SHARED_LO_SPACE, // Space shared between multiple isolates. Optional. + RO_SPACE, // Immortal, immovable and immutable objects, + OLD_SPACE, // Old generation regular object space. + CODE_SPACE, // Old generation code object space, marked executable. + MAP_SPACE, // Old generation map object space, non-movable. + NEW_SPACE, // Young generation space for regular objects collected + // with Scavenger/MinorMC. + LO_SPACE, // Old generation large object space. + CODE_LO_SPACE, // Old generation large code object space. + NEW_LO_SPACE, // Young generation large object space. FIRST_SPACE = RO_SPACE, - LAST_SPACE = SHARED_LO_SPACE, + LAST_SPACE = NEW_LO_SPACE, FIRST_MUTABLE_SPACE = OLD_SPACE, - LAST_MUTABLE_SPACE = SHARED_LO_SPACE, + LAST_MUTABLE_SPACE = NEW_LO_SPACE, FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE, LAST_GROWABLE_PAGED_SPACE = MAP_SPACE, FIRST_SWEEPABLE_SPACE = OLD_SPACE, diff --git a/src/flags/flag-definitions.h b/src/flags/flag-definitions.h index 113d4f6ec0..16f796d43d 100644 --- a/src/flags/flag-definitions.h +++ b/src/flags/flag-definitions.h @@ -1224,8 +1224,6 @@ DEFINE_BOOL(global_gc_scheduling, true, DEFINE_BOOL(gc_global, false, "always perform global GCs") DEFINE_BOOL(shared_space, false, "Implement shared heap as shared space on a main isolate.") -// Don't use a map space with --shared-space in order to avoid shared map space. -DEFINE_NEG_IMPLICATION(shared_space, use_map_space) // TODO(12950): The next two flags only have an effect if // V8_ENABLE_ALLOCATION_TIMEOUT is set, so we should only define them in that diff --git a/src/heap/base-space.cc b/src/heap/base-space.cc index bfcacbcee3..aabbeaebf5 100644 --- a/src/heap/base-space.cc +++ b/src/heap/base-space.cc @@ -17,16 +17,12 @@ const char* BaseSpace::GetSpaceName(AllocationSpace space) { return "map_space"; case CODE_SPACE: return "code_space"; - case SHARED_SPACE: - return "shared_space"; case LO_SPACE: return "large_object_space"; case NEW_LO_SPACE: return "new_large_object_space"; case CODE_LO_SPACE: return "code_large_object_space"; - case SHARED_LO_SPACE: - return "shared_lo_space"; case RO_SPACE: return "read_only_space"; } diff --git a/src/heap/heap-allocator.cc b/src/heap/heap-allocator.cc index be23977973..c78098ef28 100644 --- a/src/heap/heap-allocator.cc +++ b/src/heap/heap-allocator.cc @@ -31,7 +31,7 @@ void HeapAllocator::Setup() { shared_map_allocator_ = heap_->shared_map_allocator_ ? heap_->shared_map_allocator_.get() : shared_old_allocator_; - shared_lo_space_ = heap_->shared_isolate_lo_space_; + shared_lo_space_ = heap_->shared_lo_space(); } void HeapAllocator::SetReadOnlySpace(ReadOnlySpace* read_only_space) { diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h index c58cc702ab..6991a6dca5 100644 --- a/src/heap/heap-inl.h +++ b/src/heap/heap-inl.h @@ -485,8 +485,6 @@ bool Heap::IsPendingAllocationInternal(HeapObject object) { return addr == large_space->pending_object(); } - case SHARED_SPACE: - case SHARED_LO_SPACE: case RO_SPACE: UNREACHABLE(); } diff --git a/src/heap/heap.cc b/src/heap/heap.cc index fba1e099fb..5a307ff9e1 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -4317,10 +4317,9 @@ bool Heap::ContainsCode(HeapObject value) const { } bool Heap::SharedHeapContains(HeapObject value) const { - if (shared_isolate_old_space_) - return shared_isolate_old_space_->Contains(value) || - (shared_isolate_map_space_ && - shared_isolate_map_space_->Contains(value)); + if (shared_old_space_) + return shared_old_space_->Contains(value) || + (shared_map_space_ && shared_map_space_->Contains(value)); return false; } @@ -4351,16 +4350,12 @@ bool Heap::InSpace(HeapObject value, AllocationSpace space) const { case MAP_SPACE: DCHECK(map_space_); return map_space_->Contains(value); - case SHARED_SPACE: - return shared_space_->Contains(value); case LO_SPACE: return lo_space_->Contains(value); case CODE_LO_SPACE: return code_lo_space_->Contains(value); case NEW_LO_SPACE: return new_lo_space_->Contains(value); - case SHARED_LO_SPACE: - return shared_lo_space_->Contains(value); case RO_SPACE: return ReadOnlyHeap::Contains(value); } @@ -4385,16 +4380,12 @@ bool Heap::InSpaceSlow(Address addr, AllocationSpace space) const { case MAP_SPACE: DCHECK(map_space_); return map_space_->ContainsSlow(addr); - case SHARED_SPACE: - return shared_space_->ContainsSlow(addr); case LO_SPACE: return lo_space_->ContainsSlow(addr); case CODE_LO_SPACE: return code_lo_space_->ContainsSlow(addr); case NEW_LO_SPACE: return new_lo_space_->ContainsSlow(addr); - case SHARED_LO_SPACE: - return shared_lo_space_->ContainsSlow(addr); case RO_SPACE: return read_only_space_->ContainsSlow(addr); } @@ -4407,11 +4398,9 @@ bool Heap::IsValidAllocationSpace(AllocationSpace space) { case OLD_SPACE: case CODE_SPACE: case MAP_SPACE: - case SHARED_SPACE: case LO_SPACE: case NEW_LO_SPACE: case CODE_LO_SPACE: - case SHARED_LO_SPACE: case RO_SPACE: return true; default: @@ -5449,15 +5438,8 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info, if (v8_flags.use_map_space) { space_[MAP_SPACE] = map_space_ = new MapSpace(this); } - if (v8_flags.shared_space && isolate()->is_shared_space_isolate()) { - space_[SHARED_SPACE] = shared_space_ = new SharedSpace(this); - } space_[LO_SPACE] = lo_space_ = new OldLargeObjectSpace(this); space_[CODE_LO_SPACE] = code_lo_space_ = new CodeLargeObjectSpace(this); - if (v8_flags.shared_space && isolate()->is_shared_space_isolate()) { - space_[SHARED_LO_SPACE] = shared_lo_space_ = - new SharedLargeObjectSpace(this); - } for (int i = 0; i < static_cast(v8::Isolate::kUseCounterFeatureCount); i++) { @@ -5535,15 +5517,15 @@ void Heap::SetUpSpaces(LinearAllocationArea& new_allocation_info, if (isolate()->shared_isolate()) { Heap* shared_heap = isolate()->shared_isolate()->heap(); - shared_isolate_old_space_ = shared_heap->old_space(); - shared_isolate_lo_space_ = shared_heap->lo_space(); - shared_old_allocator_.reset(new ConcurrentAllocator( - main_thread_local_heap(), shared_isolate_old_space_)); + shared_old_space_ = shared_heap->old_space(); + shared_lo_space_ = shared_heap->lo_space(); + shared_old_allocator_.reset( + new ConcurrentAllocator(main_thread_local_heap(), shared_old_space_)); if (shared_heap->map_space()) { - shared_isolate_map_space_ = shared_heap->map_space(); - shared_map_allocator_.reset(new ConcurrentAllocator( - main_thread_local_heap(), shared_isolate_map_space_)); + shared_map_space_ = shared_heap->map_space(); + shared_map_allocator_.reset( + new ConcurrentAllocator(main_thread_local_heap(), shared_map_space_)); } } @@ -5852,10 +5834,10 @@ void Heap::TearDown() { allocation_sites_to_pretenure_.reset(); - shared_isolate_old_space_ = nullptr; + shared_old_space_ = nullptr; shared_old_allocator_.reset(); - shared_isolate_map_space_ = nullptr; + shared_map_space_ = nullptr; shared_map_allocator_.reset(); { @@ -6789,12 +6771,9 @@ bool Heap::AllowedToBeMigrated(Map map, HeapObject obj, AllocationSpace dst) { return dst == CODE_SPACE && type == CODE_TYPE; case MAP_SPACE: return dst == MAP_SPACE && type == MAP_TYPE; - case SHARED_SPACE: - return dst == SHARED_SPACE; case LO_SPACE: case CODE_LO_SPACE: case NEW_LO_SPACE: - case SHARED_LO_SPACE: case RO_SPACE: return false; } diff --git a/src/heap/heap.h b/src/heap/heap.h index 429b2bb601..daca783901 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -127,9 +127,7 @@ class SafepointScope; class ScavengeJob; class Scavenger; class ScavengerCollector; -class SharedLargeObjectSpace; class SharedReadOnlySpace; -class SharedSpace; class Space; class StressScavengeObserver; class TimedHistogram; @@ -878,11 +876,12 @@ class Heap { NewSpace* new_space() const { return new_space_; } inline PagedNewSpace* paged_new_space() const; OldSpace* old_space() const { return old_space_; } - OldSpace* shared_old_space() const { return shared_isolate_old_space_; } + OldSpace* shared_old_space() const { return shared_old_space_; } CodeSpace* code_space() const { return code_space_; } MapSpace* map_space() const { return map_space_; } inline PagedSpace* space_for_maps(); OldLargeObjectSpace* lo_space() const { return lo_space_; } + OldLargeObjectSpace* shared_lo_space() const { return shared_lo_space_; } CodeLargeObjectSpace* code_lo_space() const { return code_lo_space_; } NewLargeObjectSpace* new_lo_space() const { return new_lo_space_; } ReadOnlySpace* read_only_space() const { return read_only_space_; } @@ -2191,16 +2190,14 @@ class Heap { OldSpace* old_space_ = nullptr; CodeSpace* code_space_ = nullptr; MapSpace* map_space_ = nullptr; - SharedSpace* shared_space_ = nullptr; OldLargeObjectSpace* lo_space_ = nullptr; CodeLargeObjectSpace* code_lo_space_ = nullptr; NewLargeObjectSpace* new_lo_space_ = nullptr; - SharedLargeObjectSpace* shared_lo_space_ = nullptr; ReadOnlySpace* read_only_space_ = nullptr; - OldSpace* shared_isolate_old_space_ = nullptr; - OldLargeObjectSpace* shared_isolate_lo_space_ = nullptr; - MapSpace* shared_isolate_map_space_ = nullptr; + OldSpace* shared_old_space_ = nullptr; + OldLargeObjectSpace* shared_lo_space_ = nullptr; + MapSpace* shared_map_space_ = nullptr; std::unique_ptr shared_old_allocator_; std::unique_ptr shared_map_allocator_; diff --git a/src/heap/large-spaces.cc b/src/heap/large-spaces.cc index 2baed404a2..74c621e81f 100644 --- a/src/heap/large-spaces.cc +++ b/src/heap/large-spaces.cc @@ -582,15 +582,5 @@ void CodeLargeObjectSpace::RemovePage(LargePage* page) { OldLargeObjectSpace::RemovePage(page); } -SharedLargeObjectSpace::SharedLargeObjectSpace(Heap* heap) - : OldLargeObjectSpace(heap, SHARED_LO_SPACE) {} - -AllocationResult SharedLargeObjectSpace::AllocateRawBackground( - LocalHeap* local_heap, int object_size) { - DCHECK(!v8_flags.enable_third_party_heap); - return OldLargeObjectSpace::AllocateRawBackground(local_heap, object_size, - NOT_EXECUTABLE); -} - } // namespace internal } // namespace v8 diff --git a/src/heap/large-spaces.h b/src/heap/large-spaces.h index 576c672fff..70c55833e1 100644 --- a/src/heap/large-spaces.h +++ b/src/heap/large-spaces.h @@ -190,14 +190,6 @@ class OldLargeObjectSpace : public LargeObjectSpace { LocalHeap* local_heap, int object_size, Executability executable); }; -class SharedLargeObjectSpace : public OldLargeObjectSpace { - public: - explicit SharedLargeObjectSpace(Heap* heap); - - V8_EXPORT_PRIVATE V8_WARN_UNUSED_RESULT AllocationResult - AllocateRawBackground(LocalHeap* local_heap, int object_size); -}; - class NewLargeObjectSpace : public LargeObjectSpace { public: NewLargeObjectSpace(Heap* heap, size_t capacity); diff --git a/src/heap/paged-spaces.h b/src/heap/paged-spaces.h index 986aed3a31..7241a29b0e 100644 --- a/src/heap/paged-spaces.h +++ b/src/heap/paged-spaces.h @@ -571,32 +571,6 @@ class MapSpace final : public PagedSpace { LinearAllocationArea paged_allocation_info_; }; -// ----------------------------------------------------------------------------- -// Shared space regular object space. - -class SharedSpace final : public PagedSpace { - public: - // Creates an old space object. The constructor does not allocate pages - // from OS. - explicit SharedSpace(Heap* heap) - : PagedSpace(heap, SHARED_SPACE, NOT_EXECUTABLE, - FreeList::CreateFreeList(), allocation_info) {} - - static bool IsAtPageStart(Address addr) { - return static_cast(addr & kPageAlignmentMask) == - MemoryChunkLayout::ObjectStartOffsetInDataPage(); - } - - size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final { - if (type == ExternalBackingStoreType::kArrayBuffer) return 0; - DCHECK_EQ(type, ExternalBackingStoreType::kExternalString); - return external_backing_store_bytes_[type]; - } - - private: - LinearAllocationArea allocation_info; -}; - // Iterates over the chunks (pages and large object pages) that can contain // pointers to new space or to evacuation candidates. class OldGenerationMemoryChunkIterator { diff --git a/src/snapshot/serializer.cc b/src/snapshot/serializer.cc index e9971705ec..4410790f19 100644 --- a/src/snapshot/serializer.cc +++ b/src/snapshot/serializer.cc @@ -788,8 +788,6 @@ SnapshotSpace GetSnapshotSpace(HeapObject object) { return SnapshotSpace::kCode; case MAP_SPACE: return SnapshotSpace::kMap; - case SHARED_SPACE: - case SHARED_LO_SPACE: case CODE_LO_SPACE: case RO_SPACE: UNREACHABLE(); From f3a0e8bccf6462c58ab7ea60ef83c7defcd98f9f Mon Sep 17 00:00:00 2001 From: Leszek Swirski Date: Wed, 14 Sep 2022 15:54:14 +0200 Subject: [PATCH 0110/1772] [runtime] Key template object cache on Script Use Script as the key for the template object cache, instead of the SharedFunctionInfo. This is because SharedFunctionInfos can be garbage collected and then later recompiled, which would mean that we break the spec's expectation that the template object stays constant. Now the association of cached template object with SharedFunctionInfo is via the function_literal_id of the SharedFunctionInfo, stored on the CachedTemplateObject. These are linearly searched, similar to the linear search over slot ids. Bug: v8:13190 Change-Id: I3f67811c16ea4cd39c99b2fa034aa7e1f03c171e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3892787 Reviewed-by: Camillo Bruni Commit-Queue: Camillo Bruni Auto-Submit: Leszek Swirski Commit-Queue: Leszek Swirski Cr-Commit-Position: refs/heads/main@{#83195} --- src/objects/objects-inl.h | 3 ++ src/objects/template-objects.cc | 24 +++++++----- src/objects/template-objects.h | 3 +- src/objects/template-objects.tq | 1 + src/runtime/runtime-test.cc | 17 +++++++++ src/runtime/runtime.h | 2 + src/snapshot/context-serializer.cc | 11 +++--- test/mjsunit/mjsunit.js | 11 ++++++ test/mjsunit/regress/regress-v8-13190.js | 48 ++++++++++++++++++++++++ 9 files changed, 104 insertions(+), 16 deletions(-) create mode 100644 test/mjsunit/regress/regress-v8-13190.js diff --git a/src/objects/objects-inl.h b/src/objects/objects-inl.h index 4ba77c3009..a6262d06ea 100644 --- a/src/objects/objects-inl.h +++ b/src/objects/objects-inl.h @@ -1135,6 +1135,9 @@ Object Object::GetSimpleHash(Object object) { } else if (InstanceTypeChecker::IsSharedFunctionInfo(instance_type)) { uint32_t hash = SharedFunctionInfo::cast(object).Hash(); return Smi::FromInt(hash & Smi::kMaxValue); + } else if (InstanceTypeChecker::IsScript(instance_type)) { + int id = Script::cast(object).id(); + return Smi::FromInt(ComputeUnseededHash(id) & Smi::kMaxValue); } DCHECK(object.IsJSReceiver()); return object; diff --git a/src/objects/template-objects.cc b/src/objects/template-objects.cc index 0bcca301d9..340a9da14a 100644 --- a/src/objects/template-objects.cc +++ b/src/objects/template-objects.cc @@ -23,6 +23,7 @@ Handle TemplateObjectDescription::GetTemplateObject( // Check the template weakmap to see if the template object already exists. Handle template_weakmap; + Handle