Merge v11.2. Last aurora commit: 053e54e7

# Conflicts:
#include/v8-cppgc.h
(reverted v8 changes, keeping our fix. again, doesn't resolve msvc build issues.
 this does not fix the issue properly. xref: 4c7c7d1a)

#src/codegen/cpu-features.h
(...did something change?)

#src/flags/flag-definitions.h
(xref: 053e54e7)

#src/heap/safepoint.cc
[-] (USE AFTER FREE) quick hack: removing a mutex guard on shared RemoveClient to temporarily mitigate a crash on deinit

well, this is fucking dumb. i think someone at google also figured out this can lead to crashing. their solution: remove the mutex with a call to AssertActive.
considering my issue was related to a dead context with everything else alive, i dont want to find out what that AssertActive is doing. reverting v8 change.

#src/objects/fixed-array-inl.h
who cares
This commit is contained in:
Reece Wilson 2023-02-02 03:02:48 +00:00
commit 7c5992067e
1313 changed files with 45157 additions and 36785 deletions

View File

@ -18,11 +18,106 @@
# - Because you must use a hash, you need to append to this list in a follow-up
# CL to the actual reformatting CL that you are trying to ignore.
# objects.h splitting
0604031eb1d01c52b6c1c9ae3012d80b23d74a68
09e405453359000d66cc0faaa102854e626bebeb
766ef168fbcac6bd0728cc2c9bb3ae7cbd74278a
b5a2839b927be04bdb50a236071c11764e4d6400
c911f91b5b6219e038c0117b05a8375bdf3db0b0
1bb48bf91ba8c887884a0fbd674c91f64964d8a5
19da9f24df7b59fec72b9dd8a979ad0ce5639c87
b090d7e7468236ffce0afdb55bb496bf0073f2ee
f40638d148b7a435522d5b714993908061e3b10d
e8a1c25f6afae9b77921abb70fad49da252eb6f0
6fa8283d0e031c9585d190f751907ed45bf85de0
9aa861c4bcfed612039259f93c2cd2b01337e99e
8175648018bd9f70af866f9fa433f1d79644d86b
c7b1ceb801ec7f639a093468d8e6424212cc197c
e39d2cbe1b1baa6513ddce2d73c981e335cc34fb
eda00a5c499b7a83479115eb275a816b8a2ed104
68deca9b418976ca8b3375e81058a9e0a815357f
0525e17847f39f80e3fd163021a58f68d8fcaf06
81a3c699d6eef936452ac3d10c7c59a2c1e38c0c
01452bedfca2b5447a7f62bda87edbbb76259a6e
1baf1050113a5418696839c273e05ea5ad1b5c4d
4b39fe3d608916b1cfea015de287511a1623fc7f
c6effdbba9b301244475553538f6eb1b3d9670b9
71e4c573199466ea4541e3d6b307c9b33d7bb785
efc92f0d4aa77bb90f5b56606b6f0d0819fba4af
a9db2c74b5bae2345ac52be404748954a3b5050d
0a01b6202226bbe99c0b83acf6c5a80344f5fb6a
a6c44361c8f2dc07b935e3f2bb3e0d3ad4f4a383
10d8aab1de430695a69e9d75af6ea42c2cdc9d6d
dd3c4fca2f0a2761b8b95cd47fcd62836d714890
e9c932233980866074025e65051003d1f298516c
2b1f79881c3f0b69bfb9274bda57ea50f7304982
7f031160d71a3d836667dc98288eaff4c94e6f56
490fabb4578f8a3c4096fdccff688c17ed5ed00d
d953b2ab726acca0b3abe90ce090a16d7ccc2ae3
bb514c426b9438cfb1149d219ac4ec2d8d1c8458
dfb453d713d8a05e76f720a6aae2871eec210276
b490fd66b873c89fca37b21eab58502b6367a864
9a71683d9c8ff9470eda6be5b2b11babac7b9863
37945f731c4d800ef788e3c32f8663773a93450e
b90c98fc29a8d896354de4a22c055f6d98376171
35f3e9d0e654e84646a0b98f29e4a2786cdca4b1
260eb5bb9b62ea3d5fa6ad0b0e8c2de75d48bad4
cc2c11441ce352360acce8638a19f58edf361f7d
7be0159e4b1e0b064e215ae4ced34d649cb2552e
95a7cfe0eaabbcff0f730ed60e1805779f6cfe41
8f54d18ba4ad10770e9537a2803459feccfe79a3
f44759d9ff52a3e5563e5f2bb23ee2c08222fcfd
09050c8a967f5f2956305e5d016b304d7bf5e669
c769745d5856a7eb3a0dbe6af5376c7638944364
a1547aa914aeedd7862f74124c18d2bbaf432c36
5f950698c0dc7c36b855961feb929022f74102fb
4aedeb1bd50c12ebcd6cf954c4cbef1205fff5ac
7366d8954cb1bd277d3283241da2fae62b886c48
bc35251f5e55a65c3a4acf7cba52cee505c86a46
4fb60b215801db70c694a799e735b64bfead59bb
03762b8488de0e393077e3f40fe7b63e675b3af3
a8a45d875f0a98b192cf0063ceda12aaf75ddfaf
a48e5ab8804e9e97b5ea577d6f2667bacee92eb2
# Update of quotations in DEPS file.
e50b49a0e38b34e2b28e026f4d1c7e0da0c7bb1a
# Rewrite code base to use "." instead of "->" to access Object members.
878ccb33bd3cf0e6dc018ff8d15843f585ac07be
# Splitting src/ into subfolders
632239011db501e76475d82ff6492f37fa8c1edc
f455f86d899716df3b9550950ce172f5b867619a
24a51e1eee4e286165dd0bba6afb4c35e8177a25
f9a88acbc928f0fc5e9a3acbcd3b4ece52355f3d
dec3298d9cfbe95759774a0e00302a08836b5f3d
a0c3797461810e3159662851e64946e17654236e
b72941e8b0d2843adf768442024d8950da798db1
4c986c625f19e35c95f3492c662822f4695218b4
0fa243af7096ee5b748b194476be2e4efecaec59
786ce26341b7ab11b4d42f1c77202530d5138ad2
a6eeea35cb7ff0c29b6cfdd1c786f382110241ce
be014256adea1552d4a044ef80616cdab6a7d549
93d3b7173fec7d010539057cdbd78d497f09fa9b
5bfe84a0dab60289b3470c080908ce83ac2212d4
a7695520556665ba73ab02c497ab73b162a5fb13
61523c45a335fe3be76498e0b16bf8e7aec0d058
bf372a73d8a5f4029fc9f4f69b675ef0cad80ada
8ad6b335376c6275ffb3361c662a1a45c853f4fc
06bf8261cf2c94fc071652652600b5790f719c05
81a0102fe8586071cc68e9595b26c5c1207ee5b3
5f28539599f6a6a265e18b8c897cc96ccbeec9c4
3253767622a784866dc34aeb7b5d0f02ebdff61e
9ac8b20086f95f1158a1901eefe12e25fd0333e4
3cb560adfe26edb586a0e6e655e5a7c4755cad1a
7bbd0bfe5161d57bcf268716ce4d1ce14d6786e6
c39cabbcbea26891558b81fd2236c38a7aeada08
a3187716d31a0ab9d7051adde6be9bd2b2c6fec1
# Move test/mjsunit/regress-*.js => test/mjsunit/regress/
cb67be1a3842fcf6a0da18aee444e3b7ea789e04
# [include] Split out v8.h
d1b27019d3bf86360ea838c317f8505fac6d3a7e
44fe02ced6e4c6b49d627807e3b3fd0edbbeb36e
ec06bb6ce5641cf65e400ec55b7421f87d04b999

1
.gitignore vendored
View File

@ -28,6 +28,7 @@
.clangd
.cpplint-cache
.cproject
.DS_Store
.gclient_entries
.gdb_history
.idea

View File

@ -153,6 +153,8 @@ Jiawen Geng <technicalcute@gmail.com>
Jiaxun Yang <jiaxun.yang@flygoat.com>
Joel Stanley <joel@jms.id.au>
Johan Bergström <johan@bergstroem.nu>
Johan Levin <johan13@gmail.com>
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Jonathan Liu <net147@gmail.com>
Juan Arboleda <soyjuanarbol@gmail.com>
Julien Brianceau <jbriance@cisco.com>

View File

@ -16,7 +16,8 @@ load(
"v8_library",
"v8_mksnapshot",
"v8_string",
"v8_torque",
"v8_torque_definitions",
"v8_torque_initializers",
)
load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression")
@ -68,6 +69,7 @@ load(":bazel/v8-non-pointer-compression.bzl", "v8_binary_non_pointer_compression
# v8_enable_sandbox
# cppgc_enable_caged_heap
# cppgc_enable_check_assignments_in_prefinalizers
# cppgc_enable_slim_write_barrier
# cppgc_enable_object_names
# cppgc_enable_pointer_compression
# cppgc_enable_verify_heap
@ -146,6 +148,11 @@ v8_flag(name = "v8_enable_verify_predictable")
v8_flag(name = "v8_enable_test_features")
v8_flag(
name = "v8_enable_turbofan",
default = True,
)
v8_flag(
name = "v8_enable_webassembly",
default = True,
@ -320,6 +327,7 @@ v8_config(
"v8_enable_snapshot_native_code_counters": "V8_SNAPSHOT_NATIVE_CODE_COUNTERS",
"v8_enable_static_roots": "V8_STATIC_ROOTS",
"v8_enable_trace_maps": "V8_TRACE_MAPS",
"v8_enable_turbofan": "V8_ENABLE_TURBOFAN",
"v8_enable_v8_checks": "V8_ENABLE_CHECKS",
"v8_enable_verify_csa": "ENABLE_VERIFY_CSA",
"v8_enable_verify_heap": "VERIFY_HEAP",
@ -861,6 +869,7 @@ filegroup(
"src/builtins/string-html.tq",
"src/builtins/string-includes.tq",
"src/builtins/string-indexof.tq",
"src/builtins/string-iswellformed.tq",
"src/builtins/string-iterator.tq",
"src/builtins/string-match-search.tq",
"src/builtins/string-pad.tq",
@ -870,6 +879,7 @@ filegroup(
"src/builtins/string-startswith.tq",
"src/builtins/string-substr.tq",
"src/builtins/string-substring.tq",
"src/builtins/string-towellformed.tq",
"src/builtins/string-trim.tq",
"src/builtins/symbol.tq",
"src/builtins/torque-internal.tq",
@ -1155,7 +1165,6 @@ filegroup(
"src/builtins/builtins-utils-inl.h",
"src/builtins/builtins-utils.h",
"src/builtins/builtins-weak-refs.cc",
"src/builtins/builtins-web-snapshots.cc",
"src/builtins/builtins.cc",
"src/builtins/builtins.h",
"src/builtins/constants-table-builder.cc",
@ -1254,6 +1263,7 @@ filegroup(
"src/compiler-dispatcher/lazy-compile-dispatcher.h",
"src/compiler-dispatcher/optimizing-compile-dispatcher.cc",
"src/compiler-dispatcher/optimizing-compile-dispatcher.h",
"src/compiler/turbofan.h",
"src/date/date.cc",
"src/date/date.h",
"src/date/dateparser-inl.h",
@ -1442,9 +1452,8 @@ filegroup(
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
"src/heap/embedder-tracing.cc",
"src/heap/embedder-tracing.h",
"src/heap/embedder-tracing-inl.h",
"src/heap/cppgc-js/wrappable-info.h",
"src/heap/cppgc-js/wrappable-info-inl.h",
"src/heap/evacuation-verifier.cc",
"src/heap/evacuation-verifier.h",
"src/heap/evacuation-verifier-inl.h",
@ -1668,6 +1677,8 @@ filegroup(
"src/numbers/conversions.cc",
"src/numbers/conversions.h",
"src/numbers/hash-seed-inl.h",
"src/numbers/integer-literal-inl.h",
"src/numbers/integer-literal.h",
"src/numbers/math-random.cc",
"src/numbers/math-random.h",
"src/objects/all-objects-inl.h",
@ -1783,6 +1794,7 @@ filegroup(
"src/objects/js-shadow-realm-inl.h",
"src/objects/js-shared-array.h",
"src/objects/js-shared-array-inl.h",
"src/objects/js-struct.cc",
"src/objects/js-struct.h",
"src/objects/js-struct-inl.h",
"src/objects/js-temporal-objects.h",
@ -2199,8 +2211,6 @@ filegroup(
"src/utils/utils.h",
"src/utils/version.cc",
"src/utils/version.h",
"src/web-snapshot/web-snapshot.h",
"src/web-snapshot/web-snapshot.cc",
"src/zone/accounting-allocator.cc",
"src/zone/accounting-allocator.h",
"src/zone/compressed-zone-ptr.h",
@ -2242,10 +2252,6 @@ filegroup(
"src/codegen/ia32/macro-assembler-ia32.h",
"src/codegen/ia32/register-ia32.h",
"src/codegen/ia32/reglist-ia32.h",
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-codes-ia32.h",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
"src/deoptimizer/ia32/deoptimizer-ia32.cc",
"src/diagnostics/ia32/disasm-ia32.cc",
"src/diagnostics/ia32/unwinder-ia32.cc",
@ -2272,12 +2278,6 @@ filegroup(
"src/codegen/x64/macro-assembler-x64.h",
"src/codegen/x64/register-x64.h",
"src/codegen/x64/reglist-x64.h",
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-codes-x64.h",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
"src/compiler/backend/x64/instruction-selector-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.h",
"src/deoptimizer/x64/deoptimizer-x64.cc",
"src/diagnostics/x64/disasm-x64.cc",
"src/diagnostics/x64/eh-frame-x64.cc",
@ -2302,12 +2302,6 @@ filegroup(
"src/codegen/arm/macro-assembler-arm.h",
"src/codegen/arm/register-arm.h",
"src/codegen/arm/reglist-arm.h",
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-codes-arm.h",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
"src/compiler/backend/arm/instruction-selector-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.h",
"src/deoptimizer/arm/deoptimizer-arm.cc",
"src/diagnostics/arm/disasm-arm.cc",
"src/diagnostics/arm/eh-frame-arm.cc",
@ -2343,12 +2337,6 @@ filegroup(
"src/codegen/arm64/reglist-arm64.h",
"src/codegen/arm64/utils-arm64.cc",
"src/codegen/arm64/utils-arm64.h",
"src/compiler/backend/arm64/code-generator-arm64.cc",
"src/compiler/backend/arm64/instruction-codes-arm64.h",
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
"src/compiler/backend/arm64/instruction-selector-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
"src/deoptimizer/arm64/deoptimizer-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.h",
@ -2378,12 +2366,6 @@ filegroup(
"src/codegen/s390/macro-assembler-s390.h",
"src/codegen/s390/register-s390.h",
"src/codegen/s390/reglist-s390.h",
"src/compiler/backend/s390/code-generator-s390.cc",
"src/compiler/backend/s390/instruction-codes-s390.h",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.h",
"src/deoptimizer/s390/deoptimizer-s390.cc",
"src/diagnostics/s390/disasm-s390.cc",
"src/diagnostics/s390/eh-frame-s390.cc",
@ -2410,10 +2392,6 @@ filegroup(
"src/codegen/riscv64/macro-assembler-riscv64.h",
"src/codegen/riscv64/register-riscv64.h",
"src/codegen/riscv64/reglist-riscv64.h",
"src/compiler/backend/riscv64/code-generator-riscv64.cc",
"src/compiler/backend/riscv64/instruction-codes-riscv64.h",
"src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
"src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
"src/deoptimizer/riscv64/deoptimizer-riscv64.cc",
"src/diagnostics/riscv64/disasm-riscv64.cc",
"src/diagnostics/riscv64/unwinder-riscv64.cc",
@ -2439,12 +2417,6 @@ filegroup(
"src/codegen/ppc/macro-assembler-ppc.h",
"src/codegen/ppc/register-ppc.h",
"src/codegen/ppc/reglist-ppc.h",
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
@ -2616,7 +2588,6 @@ filegroup(
name = "icu/v8_base_without_compiler_files",
srcs = [
"src/builtins/builtins-intl.cc",
"src/builtins/builtins-intl-gen.cc",
"src/objects/intl-objects.cc",
"src/objects/intl-objects.h",
"src/objects/js-break-iterator.cc",
@ -2891,10 +2862,13 @@ filegroup(
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.cc",
"src/compiler/store-store-elimination.h",
"src/compiler/turbofan-enabled.cc",
"src/compiler/turbofan.h",
"src/compiler/turboshaft/assembler.cc",
"src/compiler/turboshaft/assembler.h",
"src/compiler/turboshaft/assert-types-reducer.h",
"src/compiler/turboshaft/branch-elimination-reducer.h",
"src/compiler/turboshaft/dead-code-elimination-reducer.h",
"src/compiler/turboshaft/decompression-optimization.cc",
"src/compiler/turboshaft/decompression-optimization.h",
"src/compiler/turboshaft/deopt-data.h",
@ -2909,6 +2883,7 @@ filegroup(
"src/compiler/turboshaft/late-escape-analysis-reducer.h",
"src/compiler/turboshaft/late-escape-analysis-reducer.cc",
"src/compiler/turboshaft/layered-hash-map.h",
"src/compiler/turboshaft/machine-lowering-reducer.h",
"src/compiler/turboshaft/machine-optimization-reducer.h",
"src/compiler/turboshaft/memory-optimization.cc",
"src/compiler/turboshaft/memory-optimization.h",
@ -2919,6 +2894,7 @@ filegroup(
"src/compiler/turboshaft/optimization-phase.h",
"src/compiler/turboshaft/recreate-schedule.cc",
"src/compiler/turboshaft/recreate-schedule.h",
"src/compiler/turboshaft/reducer-traits.h",
"src/compiler/turboshaft/representations.cc",
"src/compiler/turboshaft/representations.h",
"src/compiler/turboshaft/select-lowering-reducer.h",
@ -2929,8 +2905,10 @@ filegroup(
"src/compiler/turboshaft/type-inference-reducer.h",
"src/compiler/turboshaft/type-parser.cc",
"src/compiler/turboshaft/type-parser.h",
"src/compiler/turboshaft/typed-optimizations-reducer.h",
"src/compiler/turboshaft/types.cc",
"src/compiler/turboshaft/types.h",
"src/compiler/turboshaft/uniform-reducer-adapter.h",
"src/compiler/turboshaft/utils.cc",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-reducer.h",
@ -2954,6 +2932,59 @@ filegroup(
"src/compiler/zone-stats.cc",
"src/compiler/zone-stats.h",
] + select({
"@v8//bazel/config:v8_target_ia32": [
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-codes-ia32.h",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
],
"@v8//bazel/config:v8_target_x64": [
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-codes-x64.h",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
"src/compiler/backend/x64/instruction-selector-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.h",
],
"@v8//bazel/config:v8_target_arm": [
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-codes-arm.h",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
"src/compiler/backend/arm/instruction-selector-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.h",
],
"@v8//bazel/config:v8_target_arm64": [
"src/compiler/backend/arm64/code-generator-arm64.cc",
"src/compiler/backend/arm64/instruction-codes-arm64.h",
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
"src/compiler/backend/arm64/instruction-selector-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.h",
],
"@v8//bazel/config:v8_target_s390x": [
"src/compiler/backend/s390/code-generator-s390.cc",
"src/compiler/backend/s390/instruction-codes-s390.h",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.h",
],
"@v8//bazel/config:v8_target_riscv64": [
"src/compiler/backend/riscv64/code-generator-riscv64.cc",
"src/compiler/backend/riscv64/instruction-codes-riscv64.h",
"src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
"src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
],
"@v8//bazel/config:v8_target_ppc64le": [
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-codes-ppc.h",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.h",
],
}) + select({
":is_v8_enable_webassembly": [
"src/compiler/int64-lowering.cc",
"src/compiler/int64-lowering.h",
@ -2964,6 +2995,8 @@ filegroup(
"src/compiler/wasm-compiler.h",
"src/compiler/wasm-escape-analysis.cc",
"src/compiler/wasm-escape-analysis.h",
"src/compiler/wasm-load-elimination.cc",
"src/compiler/wasm-load-elimination.h",
"src/compiler/wasm-loop-peeling.cc",
"src/compiler/wasm-loop-peeling.h",
"src/compiler/wasm-gc-lowering.cc",
@ -2982,7 +3015,7 @@ filegroup(
)
filegroup(
name = "v8_initializers_files",
name = "noicu/v8_initializers_files",
srcs = [
"src/builtins/builtins-array-gen.cc",
"src/builtins/builtins-array-gen.h",
@ -3002,6 +3035,7 @@ filegroup(
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion-gen.cc",
"src/builtins/builtins-data-view-gen.h",
"src/builtins/builtins-data-view-gen.cc",
"src/builtins/builtins-date-gen.cc",
"src/builtins/builtins-generator-gen.cc",
"src/builtins/builtins-global-gen.cc",
@ -3073,6 +3107,14 @@ filegroup(
}),
)
filegroup(
name = "icu/v8_initializers_files",
srcs = [
"src/builtins/builtins-intl-gen.cc",
":noicu/v8_initializers_files",
],
)
filegroup(
name = "cppgc_base_files",
srcs = [
@ -3187,16 +3229,16 @@ filegroup(
# Note these cannot be v8_target_is_* selects because these contain
# inline assembly that runs inside the executable. Since these are
# linked directly into mksnapshot, they must use the actual target cpu.
"@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/save_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/save_registers_asm.cc"],
"@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/save_registers_masm.asm"],
"@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/save_registers_masm.asm"],
"@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/save_registers_masm.S"],
"@v8//bazel/config:is_inline_asm_ia32": ["src/heap/base/asm/ia32/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_x64": ["src/heap/base/asm/x64/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_arm": ["src/heap/base/asm/arm/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_arm64": ["src/heap/base/asm/arm64/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_s390x": ["src/heap/base/asm/s390/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_riscv64": ["src/heap/base/asm/riscv64/push_registers_asm.cc"],
"@v8//bazel/config:is_inline_asm_ppc64le": ["src/heap/base/asm/ppc/push_registers_asm.cc"],
"@v8//bazel/config:is_msvc_asm_ia32": ["src/heap/base/asm/ia32/push_registers_masm.asm"],
"@v8//bazel/config:is_msvc_asm_x64": ["src/heap/base/asm/x64/push_registers_masm.asm"],
"@v8//bazel/config:is_msvc_asm_arm64": ["src/heap/base/asm/arm64/push_registers_masm.S"],
}),
)
@ -3252,6 +3294,8 @@ filegroup(
filegroup(
name = "v8_inspector_files",
srcs = [
"src/inspector/crc32.cc",
"src/inspector/crc32.h",
"src/inspector/custom-preview.cc",
"src/inspector/custom-preview.h",
"src/inspector/injected-script.cc",
@ -3405,8 +3449,8 @@ filegroup(
# TODO(victorgomes): Add support to tools/debug_helper,
# which needs class-debug-readers and debug-macros.
v8_torque(
name = "generated_torque_files",
v8_torque_definitions(
name = "generated_torque_definitions",
args = select({
":is_v8_annotate_torque_ir": ["-annotate-ir"],
"//conditions:default": [],
@ -3422,12 +3466,8 @@ v8_torque(
"class-forward-declarations.h",
"class-verifiers.cc",
"class-verifiers.h",
"csa-types.h",
# "debug-macros.cc",
# "debug-macros.h",
"enum-verifiers.cc",
"exported-macros-assembler.cc",
"exported-macros-assembler.h",
"factory.cc",
"factory.inc",
"instance-types.h",
@ -3440,8 +3480,28 @@ v8_torque(
noicu_srcs = [":noicu/torque_files"],
)
v8_torque_initializers(
name = "generated_torque_initializers",
args = select({
":is_v8_annotate_torque_ir": ["-annotate-ir"],
"//conditions:default": [],
}) + select({
"@v8//bazel/config:v8_target_is_32_bits": ["-m32"],
"//conditions:default": [],
}),
extras = [
"csa-types.h",
"enum-verifiers.cc",
"exported-macros-assembler.cc",
"exported-macros-assembler.h",
],
icu_srcs = [":icu/torque_files"],
noicu_srcs = [":noicu/torque_files"],
)
py_binary(
name = "code_generator",
python_version = "PY3",
srcs = [
"third_party/inspector_protocol/code_generator.py",
"third_party/inspector_protocol/pdl.py",
@ -3507,8 +3567,6 @@ filegroup(
name = "v8_common_libshared_files",
srcs = [
":torque_runtime_support_files",
":v8_compiler_files",
":v8_initializers_files",
":v8_libplatform_files",
":v8_libsampler_files",
":v8_shared_internal_headers",
@ -3578,15 +3636,15 @@ v8_mksnapshot(
# NOTE: This allow headers to be accessed without the icu/noicu prefixes.
cc_library(
name = "icu/generated_torque_headers",
hdrs = [":icu/generated_torque_files"],
name = "icu/generated_torque_definitions_headers",
hdrs = [":icu/generated_torque_definitions"],
copts = ["-Wno-implicit-fallthrough"],
strip_include_prefix = "icu",
)
cc_library(
name = "noicu/generated_torque_headers",
hdrs = [":noicu/generated_torque_files"],
name = "noicu/generated_torque_definitions_headers",
hdrs = [":noicu/generated_torque_definitions"],
copts = ["-Wno-implicit-fallthrough"],
strip_include_prefix = "noicu",
)
@ -3632,22 +3690,27 @@ v8_library(
srcs = [
":v8_base_without_compiler_files",
":v8_common_libshared_files",
],
] + select({
":is_v8_enable_turbofan": [
":v8_compiler_files",
],
"//conditions:default": [],
}),
copts = ["-Wno-implicit-fallthrough"],
icu_deps = [
":icu/generated_torque_headers",
":icu/generated_torque_definitions_headers",
"//external:icu",
],
icu_srcs = [
":generated_regexp_special_case",
":icu/generated_torque_files",
":icu/generated_torque_definitions",
":icu/v8_base_without_compiler_files",
],
noicu_deps = [
":noicu/generated_torque_headers",
":noicu/generated_torque_definitions_headers",
],
noicu_srcs = [
":noicu/generated_torque_files",
":noicu/generated_torque_definitions",
],
deps = [
":v8_libbase",
@ -3657,7 +3720,15 @@ v8_library(
v8_library(
name = "v8",
srcs = [":v8_inspector_files"],
srcs = [
":v8_inspector_files",
] + select({
":is_not_v8_enable_turbofan": [
# With Turbofan disabled, we only include the stubbed-out API.
"src/compiler/turbofan-disabled.cc",
],
"//conditions:default": [],
}),
hdrs = [":public_header_files"],
copts = ["-Wno-implicit-fallthrough"],
icu_deps = [":icu/v8_libshared"],
@ -3747,14 +3818,30 @@ v8_binary(
v8_binary(
name = "mksnapshot",
srcs = [":mksnapshot_files"],
srcs = [
":mksnapshot_files",
] + select({
":is_not_v8_enable_turbofan": [
# Turbofan is needed to generate builtins.
":v8_compiler_files",
],
"//conditions:default": [],
}),
copts = ["-Wno-implicit-fallthrough"],
icu_deps = [":icu/v8_libshared"],
linkopts = select({
"@v8//bazel/config:is_android": ["-llog"],
"//conditions:default": [],
}),
icu_srcs = [
":icu/generated_torque_initializers",
":icu/v8_initializers_files",
],
noicu_deps = [":v8_libshared_noicu"],
noicu_srcs = [
":noicu/generated_torque_initializers",
":noicu/v8_initializers_files",
],
)
v8_binary(

325
BUILD.gn
View File

@ -10,10 +10,6 @@ import("//build/config/mips.gni")
import("//build/config/sanitizers/sanitizers.gni")
import("//build_overrides/build.gni")
if (is_android) {
import("//build/config/android/rules.gni")
}
import("gni/snapshot_toolchain.gni")
import("gni/v8.gni")
@ -121,7 +117,7 @@ declare_args() {
v8_enable_snapshot_native_code_counters = ""
# Use pre-generated static root pointer values from static-roots.h.
v8_enable_static_roots = false
v8_enable_static_roots = ""
# Enable code-generation-time checking of types in the CodeStubAssembler.
v8_enable_verify_csa = false
@ -207,10 +203,6 @@ declare_args() {
# Sets -dV8_EXTERNAL_CODE_SPACE
v8_enable_external_code_space = ""
# Enable the Maglev compiler.
# Sets -dV8_ENABLE_MAGLEV
v8_enable_maglev = ""
# With post mortem support enabled, metadata is embedded into libv8 that
# describes various parameters of the VM for use by debuggers. See
# tools/gen-postmortem-metadata.py for details.
@ -462,8 +454,13 @@ if (v8_enable_external_code_space == "") {
(target_os != "fuchsia" && v8_current_cpu == "arm64"))
}
if (v8_enable_maglev == "") {
v8_enable_maglev = v8_current_cpu == "x64" && v8_enable_pointer_compression
v8_enable_maglev = v8_enable_turbofan &&
(v8_current_cpu == "x64" || v8_current_cpu == "arm64") &&
v8_enable_pointer_compression
}
assert(v8_enable_turbofan || !v8_enable_maglev,
"Maglev is not available when Turbofan is disabled.")
if (v8_builtins_profiling_log_file == "default") {
v8_builtins_profiling_log_file = ""
@ -516,6 +513,10 @@ assert(!v8_enable_trace_ignition || v8_enable_trace_unoptimized,
"Ignition tracing requires unoptimized tracing to be enabled.")
assert(!v8_enable_trace_baseline_exec || v8_enable_trace_unoptimized,
"Baseline tracing requires unoptimized tracing to be enabled.")
assert(
v8_enable_debugging_features == true || dcheck_always_on ||
!v8_enable_slow_dchecks,
"v8_enable_slow_dchecks requires v8_enable_debugging_features or dcheck_always_on.")
if (v8_enable_short_builtin_calls &&
(!v8_enable_pointer_compression && v8_current_cpu != "x64")) {
@ -540,11 +541,16 @@ if (v8_enable_sandbox == "") {
if (v8_enable_static_roots == "") {
# Static roots are only valid for builds with pointer compression and a
# shared ro heap. Also, non-wasm and non-i18n builds have fewer read-only
# roots.
# shared read-only heap.
# TODO(olivf, v8:13466) Some configurations could be supported if we
# introduce different static root files for different build configurations:
# Non-wasm and non-i18n builds have fewer read only roots. Configurations
# without external code space allocate read only roots at a further
# location relative to the cage base.
v8_enable_static_roots =
v8_enable_pointer_compression && v8_enable_shared_ro_heap &&
v8_enable_pointer_compression_shared_cage && v8_enable_webassembly &&
v8_enable_pointer_compression_shared_cage &&
v8_enable_external_code_space && v8_enable_webassembly &&
v8_enable_i18n_support
}
@ -598,8 +604,9 @@ assert(
assert(
!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" ||
v8_current_cpu == "arm64" || v8_current_cpu == "riscv64",
"Sharing a pointer compression cage is only supported on x64,arm64 and riscv64")
v8_current_cpu == "arm64" || v8_current_cpu == "riscv64" ||
v8_current_cpu == "ppc64",
"Sharing a pointer compression cage is only supported on x64,arm64, ppc64 and riscv64")
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
@ -831,6 +838,7 @@ config("v8_header_features") {
external_cppgc_defines = [
"CPPGC_SUPPORTS_OBJECT_NAMES",
"CPPGC_CAGED_HEAP",
"CPPGC_SLIM_WRITE_BARRIER",
"CPPGC_YOUNG_GENERATION",
"CPPGC_POINTER_COMPRESSION",
]
@ -860,6 +868,9 @@ if (cppgc_enable_pointer_compression) {
if (cppgc_enable_2gb_cage) {
enabled_external_cppgc_defines += [ "CPPGC_2GB_CAGE" ]
}
if (cppgc_enable_slim_write_barrier) {
enabled_external_cppgc_defines += [ "CPPGC_SLIM_WRITE_BARRIER" ]
}
disabled_external_cppgc_defines =
external_cppgc_defines - enabled_external_cppgc_defines
@ -1041,6 +1052,9 @@ config("features") {
if (v8_enable_maglev) {
defines += [ "V8_ENABLE_MAGLEV" ]
}
if (v8_enable_turbofan) {
defines += [ "V8_ENABLE_TURBOFAN" ]
}
if (v8_enable_swiss_name_dictionary) {
defines += [ "V8_ENABLE_SWISS_NAME_DICTIONARY" ]
}
@ -1294,13 +1308,12 @@ config("toolchain") {
if ((is_linux || is_chromeos) && v8_enable_backtrace) {
ldflags += [ "-rdynamic" ]
}
}
if (v8_enable_debugging_features || dcheck_always_on) {
defines += [ "DEBUG" ]
if (v8_enable_slow_dchecks) {
defines += [ "ENABLE_SLOW_DCHECKS" ]
}
} else if (dcheck_always_on) {
defines += [ "DEBUG" ]
}
if (v8_enable_verify_csa) {
@ -1599,21 +1612,6 @@ template("asm_to_inline_asm") {
}
}
if (is_android && enable_java_templates) {
android_assets("v8_external_startup_data_assets") {
if (v8_use_external_startup_data) {
deps = [ "//v8" ]
renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ]
if (current_cpu == "arm" || current_cpu == "x86") {
renaming_destinations = [ "snapshot_blob_32.bin" ]
} else {
renaming_destinations = [ "snapshot_blob_64.bin" ]
}
disable_compression = true
}
}
}
if (v8_postmortem_support) {
action("postmortem-metadata") {
# Only targets in this file and the top-level visibility target can
@ -1801,6 +1799,7 @@ torque_files = [
"src/builtins/string-html.tq",
"src/builtins/string-includes.tq",
"src/builtins/string-indexof.tq",
"src/builtins/string-iswellformed.tq",
"src/builtins/string-iterator.tq",
"src/builtins/string-match-search.tq",
"src/builtins/string-pad.tq",
@ -1810,6 +1809,7 @@ torque_files = [
"src/builtins/string-startswith.tq",
"src/builtins/string-substr.tq",
"src/builtins/string-substring.tq",
"src/builtins/string-towellformed.tq",
"src/builtins/string-trim.tq",
"src/builtins/symbol.tq",
"src/builtins/torque-internal.tq",
@ -2160,12 +2160,6 @@ template("run_mksnapshot") {
suffix = "_$name"
}
action("run_mksnapshot_" + name) {
# Only targets in this file and running mkgrokdump can depend on this.
visibility = [
":*",
"tools/debug_helper:run_mkgrokdump",
]
deps = [ ":mksnapshot($v8_snapshot_toolchain)" ]
script = "tools/run.py"
@ -2307,6 +2301,7 @@ action("v8_dump_build_config") {
script = "tools/testrunner/utils/dump_build_config.py"
outputs = [ "$root_out_dir/v8_build_config.json" ]
is_gcov_coverage = v8_code_coverage && !is_clang
is_DEBUG_defined = v8_enable_debugging_features || dcheck_always_on
is_full_debug = v8_enable_debugging_features && !v8_optimized_debug
args = [
rebase_path("$root_out_dir/v8_build_config.json", root_build_dir),
@ -2318,36 +2313,45 @@ action("v8_dump_build_config") {
"is_clang=$is_clang",
"is_component_build=$is_component_build",
"is_debug=$v8_enable_debugging_features",
"is_DEBUG_defined=$is_DEBUG_defined",
"is_full_debug=$is_full_debug",
"is_gcov_coverage=$is_gcov_coverage",
"is_msan=$is_msan",
"is_tsan=$is_tsan",
"is_ubsan_vptr=$is_ubsan_vptr",
"target_cpu=\"$target_cpu\"",
"v8_code_comments=$v8_code_comments",
"v8_control_flow_integrity=$v8_control_flow_integrity",
"v8_current_cpu=\"$v8_current_cpu\"",
"v8_dict_property_const_tracking=$v8_dict_property_const_tracking",
"v8_disable_write_barriers=$v8_disable_write_barriers",
"v8_enable_atomic_object_field_writes=" +
"$v8_enable_atomic_object_field_writes",
"v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack",
"v8_enable_concurrent_marking=$v8_enable_concurrent_marking",
"v8_enable_conservative_stack_scanning=" +
"$v8_enable_conservative_stack_scanning",
"v8_enable_concurrent_marking=$v8_enable_concurrent_marking",
"v8_enable_single_generation=$v8_enable_single_generation",
"v8_enable_debug_code=$v8_enable_debug_code",
"v8_enable_disassembler=$v8_enable_disassembler",
"v8_enable_gdbjit=$v8_enable_gdbjit",
"v8_enable_i18n_support=$v8_enable_i18n_support",
"v8_enable_verify_predictable=$v8_enable_verify_predictable",
"v8_enable_verify_csa=$v8_enable_verify_csa",
"v8_enable_lite_mode=$v8_enable_lite_mode",
"v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats",
"v8_enable_maglev=$v8_enable_maglev",
"v8_enable_pointer_compression=$v8_enable_pointer_compression",
"v8_enable_pointer_compression_shared_cage=" +
"$v8_enable_pointer_compression_shared_cage",
"v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats",
"v8_enable_sandbox=$v8_enable_sandbox",
"v8_enable_shared_ro_heap=$v8_enable_shared_ro_heap",
"v8_disable_write_barriers=$v8_disable_write_barriers",
"v8_enable_single_generation=$v8_enable_single_generation",
"v8_enable_slow_dchecks=$v8_enable_slow_dchecks",
"v8_enable_third_party_heap=$v8_enable_third_party_heap",
"v8_enable_turbofan=$v8_enable_turbofan",
"v8_enable_verify_csa=$v8_enable_verify_csa",
"v8_enable_verify_heap=$v8_enable_verify_heap",
"v8_enable_verify_predictable=$v8_enable_verify_predictable",
"v8_enable_webassembly=$v8_enable_webassembly",
"v8_dict_property_const_tracking=$v8_dict_property_const_tracking",
"v8_control_flow_integrity=$v8_control_flow_integrity",
"v8_target_cpu=\"$v8_target_cpu\"",
"v8_enable_cet_shadow_stack=$v8_enable_cet_shadow_stack",
]
if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
@ -2368,6 +2372,7 @@ v8_source_set("v8_snapshot") {
deps = [
":v8_internal_headers",
":v8_libbase",
":v8_tracing",
]
public_deps = [
# This should be public so downstream targets can declare the snapshot
@ -2439,6 +2444,7 @@ v8_source_set("v8_initializers") {
"src/builtins/builtins-constructor-gen.h",
"src/builtins/builtins-constructor.h",
"src/builtins/builtins-conversion-gen.cc",
"src/builtins/builtins-data-view-gen.cc",
"src/builtins/builtins-data-view-gen.h",
"src/builtins/builtins-date-gen.cc",
"src/builtins/builtins-generator-gen.cc",
@ -2980,9 +2986,11 @@ v8_header_set("v8_internal_headers") {
"src/compiler/simplified-operator.h",
"src/compiler/state-values-utils.h",
"src/compiler/store-store-elimination.h",
"src/compiler/turbofan.h",
"src/compiler/turboshaft/assembler.h",
"src/compiler/turboshaft/assert-types-reducer.h",
"src/compiler/turboshaft/branch-elimination-reducer.h",
"src/compiler/turboshaft/dead-code-elimination-reducer.h",
"src/compiler/turboshaft/decompression-optimization.h",
"src/compiler/turboshaft/deopt-data.h",
"src/compiler/turboshaft/fast-hash.h",
@ -2992,12 +3000,14 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/index.h",
"src/compiler/turboshaft/late-escape-analysis-reducer.h",
"src/compiler/turboshaft/layered-hash-map.h",
"src/compiler/turboshaft/machine-lowering-reducer.h",
"src/compiler/turboshaft/machine-optimization-reducer.h",
"src/compiler/turboshaft/memory-optimization.h",
"src/compiler/turboshaft/operation-matching.h",
"src/compiler/turboshaft/operations.h",
"src/compiler/turboshaft/optimization-phase.h",
"src/compiler/turboshaft/recreate-schedule.h",
"src/compiler/turboshaft/reducer-traits.h",
"src/compiler/turboshaft/representations.h",
"src/compiler/turboshaft/select-lowering-reducer.h",
"src/compiler/turboshaft/sidetable.h",
@ -3005,7 +3015,9 @@ v8_header_set("v8_internal_headers") {
"src/compiler/turboshaft/snapshot-table.h",
"src/compiler/turboshaft/type-inference-reducer.h",
"src/compiler/turboshaft/type-parser.h",
"src/compiler/turboshaft/typed-optimizations-reducer.h",
"src/compiler/turboshaft/types.h",
"src/compiler/turboshaft/uniform-reducer-adapater.h",
"src/compiler/turboshaft/utils.h",
"src/compiler/turboshaft/value-numbering-reducer.h",
"src/compiler/turboshaft/variable-reducer.h",
@ -3124,8 +3136,8 @@ v8_header_set("v8_internal_headers") {
"src/heap/cppgc-js/unified-heap-marking-state.h",
"src/heap/cppgc-js/unified-heap-marking-verifier.h",
"src/heap/cppgc-js/unified-heap-marking-visitor.h",
"src/heap/embedder-tracing-inl.h",
"src/heap/embedder-tracing.h",
"src/heap/cppgc-js/wrappable-info-inl.h",
"src/heap/cppgc-js/wrappable-info.h",
"src/heap/evacuation-allocator-inl.h",
"src/heap/evacuation-allocator.h",
"src/heap/evacuation-verifier-inl.h",
@ -3640,6 +3652,7 @@ v8_header_set("v8_internal_headers") {
if (v8_enable_maglev) {
sources += [
"src/maglev/maglev-assembler-inl.h",
"src/maglev/maglev-assembler.h",
"src/maglev/maglev-basic-block.h",
"src/maglev/maglev-code-gen-state.h",
@ -3685,6 +3698,7 @@ v8_header_set("v8_internal_headers") {
"src/compiler/wasm-gc-operator-reducer.h",
"src/compiler/wasm-graph-assembler.h",
"src/compiler/wasm-inlining.h",
"src/compiler/wasm-load-elimination.h",
"src/compiler/wasm-loop-peeling.h",
"src/compiler/wasm-typer.h",
"src/debug/debug-wasm-objects-inl.h",
@ -3936,7 +3950,7 @@ v8_header_set("v8_internal_headers") {
if (v8_enable_webassembly) {
# Trap handling is enabled on arm64 Mac and in simulators on x64 on Linux
# and Mac.
if ((current_cpu == "arm64" && is_mac) ||
if ((current_cpu == "arm64" && (is_mac || is_ios)) ||
(current_cpu == "x64" && (is_linux || is_chromeos || is_mac))) {
sources += [ "src/trap-handler/handler-inside-posix.h" ]
}
@ -4241,6 +4255,7 @@ v8_compiler_sources = [
"src/compiler/simplified-operator.cc",
"src/compiler/state-values-utils.cc",
"src/compiler/store-store-elimination.cc",
"src/compiler/turbofan-enabled.cc",
"src/compiler/type-cache.cc",
"src/compiler/type-narrowing-reducer.cc",
"src/compiler/typed-optimization.cc",
@ -4249,10 +4264,93 @@ v8_compiler_sources = [
"src/compiler/value-numbering-reducer.cc",
"src/compiler/verifier.cc",
"src/compiler/zone-stats.cc",
"src/utils/hex-format.cc",
"src/utils/sha-256.cc",
]
if (v8_current_cpu == "x86") {
v8_compiler_sources += [
### gcmole(ia32) ###
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
]
} else if (v8_current_cpu == "x64") {
v8_compiler_sources += [
### gcmole(x64) ###
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
"src/compiler/backend/x64/instruction-selector-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
]
} else if (v8_current_cpu == "arm") {
v8_compiler_sources += [
### gcmole(arm) ###
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
"src/compiler/backend/arm/instruction-selector-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
v8_compiler_sources += [
### gcmole(arm64) ###
"src/compiler/backend/arm64/code-generator-arm64.cc",
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
"src/compiler/backend/arm64/instruction-selector-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
]
} else if (v8_current_cpu == "mips64" || v8_current_cpu == "mips64el") {
v8_compiler_sources += [
### gcmole(mips64el) ###
"src/compiler/backend/mips64/code-generator-mips64.cc",
"src/compiler/backend/mips64/instruction-scheduler-mips64.cc",
"src/compiler/backend/mips64/instruction-selector-mips64.cc",
]
} else if (v8_current_cpu == "loong64") {
v8_compiler_sources += [
### gcmole(loong64) ###
"src/compiler/backend/loong64/code-generator-loong64.cc",
"src/compiler/backend/loong64/instruction-scheduler-loong64.cc",
"src/compiler/backend/loong64/instruction-selector-loong64.cc",
]
} else if (v8_current_cpu == "ppc") {
v8_compiler_sources += [
### gcmole(ppc) ###
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
]
} else if (v8_current_cpu == "ppc64") {
v8_compiler_sources += [
### gcmole(ppc64) ###
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
]
} else if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
v8_compiler_sources += [
### gcmole(s390) ###
"src/compiler/backend/s390/code-generator-s390.cc",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.cc",
]
} else if (v8_current_cpu == "riscv64") {
v8_compiler_sources += [
### gcmole(riscv64) ###
"src/compiler/backend/riscv/code-generator-riscv.cc",
"src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
"src/compiler/backend/riscv/instruction-selector-riscv64.cc",
]
} else if (v8_current_cpu == "riscv32") {
v8_compiler_sources += [
### gcmole(riscv32) ###
"src/compiler/backend/riscv/code-generator-riscv.cc",
"src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
"src/compiler/backend/riscv/instruction-selector-riscv32.cc",
]
}
if (v8_enable_webassembly) {
v8_compiler_sources += [
"src/compiler/int64-lowering.cc",
@ -4263,6 +4361,7 @@ if (v8_enable_webassembly) {
"src/compiler/wasm-gc-operator-reducer.cc",
"src/compiler/wasm-graph-assembler.cc",
"src/compiler/wasm-inlining.cc",
"src/compiler/wasm-load-elimination.cc",
"src/compiler/wasm-loop-peeling.cc",
"src/compiler/wasm-typer.cc",
]
@ -4275,8 +4374,12 @@ if (v8_enable_wasm_simd256_revec) {
]
}
# The src/compiler files with optimizations.
v8_source_set("v8_compiler_opt") {
# The src/compiler files for use in mksnapshot.
# - These might be built with additional optimizations if
# v8_enable_fast_mksnapshot is set.
# - We always include Turbofan even if v8_enable_turbofan is unset s.t.
# builtins can be generated by mksnapshot.
v8_source_set("v8_compiler_for_mksnapshot_source_set") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = v8_compiler_sources
@ -4307,11 +4410,16 @@ v8_source_set("v8_compiler_opt") {
}
}
# The src/compiler files with default optimization behavior.
# The src/compiler files with default behavior.
v8_source_set("v8_compiler") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
sources = v8_compiler_sources
if (v8_enable_turbofan) {
sources = v8_compiler_sources
} else {
# With Turbofan disabled, we only include the stubbed-out API.
sources = [ "src/compiler/turbofan-disabled.cc" ]
}
public_deps = [
":generate_bytecode_builtins_list",
@ -4370,8 +4478,14 @@ v8_source_set("v8_turboshaft") {
}
group("v8_compiler_for_mksnapshot") {
if (is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) {
deps = [ ":v8_compiler_opt" ]
if ((is_debug && !v8_optimized_debug && v8_enable_fast_mksnapshot) ||
!v8_enable_turbofan) {
# mksnapshot needs its own version of the compiler, either because
# a) we're optimizing for mksnapshot execution speed and the compiler
# should be optimized even if the rest of V8 is not; or
# b) Turbofan is disabled and thus not compiled into the rest of V8, yet
# mksnapshot still needs TF to generate builtins.
deps = [ ":v8_compiler_for_mksnapshot_source_set" ]
} else {
deps = [ ":v8_compiler" ]
}
@ -4390,7 +4504,11 @@ group("v8_tracing") {
}
v8_source_set("v8_base_without_compiler") {
visibility = [ ":*" ] # Only targets in this file can depend on this.
# Only targets in this file and gcmole can depend on this.
visibility = [
":*",
"tools/gcmole/:*",
]
# Split static libraries on windows into two.
split_count = 2
@ -4444,7 +4562,6 @@ v8_source_set("v8_base_without_compiler") {
"src/builtins/builtins-trace.cc",
"src/builtins/builtins-typed-array.cc",
"src/builtins/builtins-weak-refs.cc",
"src/builtins/builtins-web-snapshots.cc",
"src/builtins/builtins.cc",
"src/builtins/constants-table-builder.cc",
"src/codegen/aligned-slot-allocator.cc",
@ -4558,7 +4675,6 @@ v8_source_set("v8_base_without_compiler") {
"src/heap/cppgc-js/unified-heap-marking-state.cc",
"src/heap/cppgc-js/unified-heap-marking-verifier.cc",
"src/heap/cppgc-js/unified-heap-marking-visitor.cc",
"src/heap/embedder-tracing.cc",
"src/heap/evacuation-verifier.cc",
"src/heap/factory-base.cc",
"src/heap/factory.cc",
@ -4679,6 +4795,7 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/js-segment-iterator.cc",
"src/objects/js-segmenter.cc",
"src/objects/js-segments.cc",
"src/objects/js-struct.cc",
"src/objects/js-temporal-objects.cc",
"src/objects/keys.cc",
"src/objects/literal-objects.cc",
@ -4831,13 +4948,13 @@ v8_source_set("v8_base_without_compiler") {
"src/utils/allocation.cc",
"src/utils/bit-vector.cc",
"src/utils/detachable-vector.cc",
"src/utils/hex-format.cc",
"src/utils/identity-map.cc",
"src/utils/memcopy.cc",
"src/utils/ostreams.cc",
"src/utils/sha-256.cc",
"src/utils/utils.cc",
"src/utils/version.cc",
"src/web-snapshot/web-snapshot.cc",
"src/web-snapshot/web-snapshot.h",
"src/zone/accounting-allocator.cc",
"src/zone/type-stats.cc",
"src/zone/zone-segment.cc",
@ -4966,9 +5083,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/ia32/cpu-ia32.cc",
"src/codegen/ia32/macro-assembler-ia32.cc",
"src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc",
"src/compiler/backend/ia32/code-generator-ia32.cc",
"src/compiler/backend/ia32/instruction-scheduler-ia32.cc",
"src/compiler/backend/ia32/instruction-selector-ia32.cc",
"src/deoptimizer/ia32/deoptimizer-ia32.cc",
"src/diagnostics/ia32/disasm-ia32.cc",
"src/diagnostics/ia32/unwinder-ia32.cc",
@ -4982,10 +5096,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/x64/assembler-x64.cc",
"src/codegen/x64/cpu-x64.cc",
"src/codegen/x64/macro-assembler-x64.cc",
"src/compiler/backend/x64/code-generator-x64.cc",
"src/compiler/backend/x64/instruction-scheduler-x64.cc",
"src/compiler/backend/x64/instruction-selector-x64.cc",
"src/compiler/backend/x64/unwinding-info-writer-x64.cc",
"src/deoptimizer/x64/deoptimizer-x64.cc",
"src/diagnostics/x64/disasm-x64.cc",
"src/diagnostics/x64/eh-frame-x64.cc",
@ -5021,10 +5131,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/arm/constants-arm.cc",
"src/codegen/arm/cpu-arm.cc",
"src/codegen/arm/macro-assembler-arm.cc",
"src/compiler/backend/arm/code-generator-arm.cc",
"src/compiler/backend/arm/instruction-scheduler-arm.cc",
"src/compiler/backend/arm/instruction-selector-arm.cc",
"src/compiler/backend/arm/unwinding-info-writer-arm.cc",
"src/deoptimizer/arm/deoptimizer-arm.cc",
"src/diagnostics/arm/disasm-arm.cc",
"src/diagnostics/arm/eh-frame-arm.cc",
@ -5044,10 +5150,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/arm64/macro-assembler-arm64.cc",
"src/codegen/arm64/register-arm64.cc",
"src/codegen/arm64/utils-arm64.cc",
"src/compiler/backend/arm64/code-generator-arm64.cc",
"src/compiler/backend/arm64/instruction-scheduler-arm64.cc",
"src/compiler/backend/arm64/instruction-selector-arm64.cc",
"src/compiler/backend/arm64/unwinding-info-writer-arm64.cc",
"src/deoptimizer/arm64/deoptimizer-arm64.cc",
"src/diagnostics/arm64/disasm-arm64.cc",
"src/diagnostics/arm64/eh-frame-arm64.cc",
@ -5089,9 +5191,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/mips64/cpu-mips64.cc",
"src/codegen/mips64/interface-descriptors-mips64-inl.h",
"src/codegen/mips64/macro-assembler-mips64.cc",
"src/compiler/backend/mips64/code-generator-mips64.cc",
"src/compiler/backend/mips64/instruction-scheduler-mips64.cc",
"src/compiler/backend/mips64/instruction-selector-mips64.cc",
"src/deoptimizer/mips64/deoptimizer-mips64.cc",
"src/diagnostics/mips64/disasm-mips64.cc",
"src/diagnostics/mips64/unwinder-mips64.cc",
@ -5107,9 +5206,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/loong64/cpu-loong64.cc",
"src/codegen/loong64/interface-descriptors-loong64-inl.h",
"src/codegen/loong64/macro-assembler-loong64.cc",
"src/compiler/backend/loong64/code-generator-loong64.cc",
"src/compiler/backend/loong64/instruction-scheduler-loong64.cc",
"src/compiler/backend/loong64/instruction-selector-loong64.cc",
"src/deoptimizer/loong64/deoptimizer-loong64.cc",
"src/diagnostics/loong64/disasm-loong64.cc",
"src/diagnostics/loong64/unwinder-loong64.cc",
@ -5124,10 +5220,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/ppc/constants-ppc.cc",
"src/codegen/ppc/cpu-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
@ -5143,10 +5235,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/ppc/constants-ppc.cc",
"src/codegen/ppc/cpu-ppc.cc",
"src/codegen/ppc/macro-assembler-ppc.cc",
"src/compiler/backend/ppc/code-generator-ppc.cc",
"src/compiler/backend/ppc/instruction-scheduler-ppc.cc",
"src/compiler/backend/ppc/instruction-selector-ppc.cc",
"src/compiler/backend/ppc/unwinding-info-writer-ppc.cc",
"src/deoptimizer/ppc/deoptimizer-ppc.cc",
"src/diagnostics/ppc/disasm-ppc.cc",
"src/diagnostics/ppc/eh-frame-ppc.cc",
@ -5162,10 +5250,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/s390/constants-s390.cc",
"src/codegen/s390/cpu-s390.cc",
"src/codegen/s390/macro-assembler-s390.cc",
"src/compiler/backend/s390/code-generator-s390.cc",
"src/compiler/backend/s390/instruction-scheduler-s390.cc",
"src/compiler/backend/s390/instruction-selector-s390.cc",
"src/compiler/backend/s390/unwinding-info-writer-s390.cc",
"src/deoptimizer/s390/deoptimizer-s390.cc",
"src/diagnostics/s390/disasm-s390.cc",
"src/diagnostics/s390/eh-frame-s390.cc",
@ -5191,9 +5275,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/riscv/extension-riscv-zicsr.cc",
"src/codegen/riscv/extension-riscv-zifencei.cc",
"src/codegen/riscv/macro-assembler-riscv.cc",
"src/compiler/backend/riscv/code-generator-riscv.cc",
"src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
"src/compiler/backend/riscv/instruction-selector-riscv64.cc",
"src/deoptimizer/riscv/deoptimizer-riscv.cc",
"src/diagnostics/riscv/disasm-riscv.cc",
"src/diagnostics/riscv/unwinder-riscv.cc",
@ -5218,9 +5299,6 @@ v8_source_set("v8_base_without_compiler") {
"src/codegen/riscv/extension-riscv-zicsr.cc",
"src/codegen/riscv/extension-riscv-zifencei.cc",
"src/codegen/riscv/macro-assembler-riscv.cc",
"src/compiler/backend/riscv/code-generator-riscv.cc",
"src/compiler/backend/riscv/instruction-scheduler-riscv.cc",
"src/compiler/backend/riscv/instruction-selector-riscv32.cc",
"src/deoptimizer/riscv/deoptimizer-riscv.cc",
"src/diagnostics/riscv/disasm-riscv.cc",
"src/diagnostics/riscv/unwinder-riscv.cc",
@ -5341,8 +5419,11 @@ group("v8_base") {
public_deps = [
":v8_base_without_compiler",
":v8_compiler",
":v8_turboshaft",
]
if (v8_enable_turbofan) {
public_deps += [ ":v8_turboshaft" ]
}
}
v8_source_set("torque_base") {
@ -5908,31 +5989,31 @@ v8_source_set("v8_heap_base") {
if (is_clang || !is_win) {
if (current_cpu == "x64") {
sources += [ "src/heap/base/asm/x64/save_registers_asm.cc" ]
sources += [ "src/heap/base/asm/x64/push_registers_asm.cc" ]
} else if (current_cpu == "x86") {
sources += [ "src/heap/base/asm/ia32/save_registers_asm.cc" ]
sources += [ "src/heap/base/asm/ia32/push_registers_asm.cc" ]
} else if (current_cpu == "arm") {
sources += [ "src/heap/base/asm/arm/save_registers_asm.cc" ]
sources += [ "src/heap/base/asm/arm/push_registers_asm.cc" ]
} else if (current_cpu == "arm64") {
sources += [ "src/heap/base/asm/arm64/save_registers_asm.cc" ]
sources += [ "src/heap/base/asm/arm64/push_registers_asm.cc" ]
} else if (current_cpu == "ppc64") {
sources += [ "src/heap/base/asm/ppc/save_registers_asm.cc" ]
sources += [ "src/heap/base/asm/ppc/push_registers_asm.cc" ]
} else if (current_cpu == "s390x") {
sources += [ "src/heap/base/asm/s390/save_registers_asm.cc" ]
sources += [ "src/heap/base/asm/s390/push_registers_asm.cc" ]
} else if (current_cpu == "mips64el") {
sources += [ "src/heap/base/asm/mips64/save_registers_asm.cc" ]
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
} else if (current_cpu == "loong64") {
sources += [ "src/heap/base/asm/loong64/save_registers_asm.cc" ]
sources += [ "src/heap/base/asm/loong64/push_registers_asm.cc" ]
} else if (current_cpu == "riscv64" || current_cpu == "riscv32") {
sources += [ "src/heap/base/asm/riscv/save_registers_asm.cc" ]
sources += [ "src/heap/base/asm/riscv/push_registers_asm.cc" ]
}
} else if (is_win) {
if (current_cpu == "x64") {
sources += [ "src/heap/base/asm/x64/save_registers_masm.asm" ]
sources += [ "src/heap/base/asm/x64/push_registers_masm.asm" ]
} else if (current_cpu == "x86") {
sources += [ "src/heap/base/asm/ia32/save_registers_masm.asm" ]
sources += [ "src/heap/base/asm/ia32/push_registers_masm.asm" ]
} else if (current_cpu == "arm64") {
sources += [ "src/heap/base/asm/arm64/save_registers_masm.S" ]
sources += [ "src/heap/base/asm/arm64/push_registers_masm.S" ]
}
}
@ -6548,10 +6629,14 @@ if (is_component_build) {
":torque_ls_base",
":v8_base",
":v8_headers",
":v8_initializers",
":v8_snapshot",
]
if (v8_enable_turbofan) {
# For cctest/test-serialize.
public_deps += [ ":v8_initializers" ]
}
configs = [ ":internal_config" ]
public_configs = [ ":external_config" ]
@ -6604,10 +6689,14 @@ if (is_component_build) {
":torque_base",
":torque_ls_base",
":v8_base",
":v8_initializers",
":v8_snapshot",
]
if (v8_enable_turbofan) {
# For cctest/test-serialize.
public_deps += [ ":v8_initializers" ]
}
public_configs = [ ":external_config" ]
}

55
DEPS
View File

@ -39,6 +39,9 @@ vars = {
# Fetch clang-tidy into the same bin/ directory as our clang binary.
'checkout_clang_tidy': False,
# Fetch and build V8 builtins with PGO profiles
'checkout_v8_builtins_pgo_profiles': False,
'chromium_url': 'https://chromium.googlesource.com',
'android_url': 'https://android.googlesource.com',
'download_gcmole': False,
@ -50,22 +53,22 @@ vars = {
'fuchsia_sdk_cipd_prefix': 'fuchsia/sdk/gn/',
# reclient CIPD package version
'reclient_version': 're_client_version:0.87.0.b6908b3-gomaip',
'reclient_version': 're_client_version:0.93.0.9ed3cef-gomaip',
# GN CIPD package version.
'gn_version': 'git_revision:5e19d2fb166fbd4f6f32147fbb2f497091a54ad8',
'gn_version': 'git_revision:84c8431f3e03cc6226c59dd49637c15ea31169a1',
# ninja CIPD package version
# https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja
'ninja_version': 'version:2@1.8.2.chromium.3',
'ninja_version': 'version:2@1.11.1.chromium.6',
# luci-go CIPD package version.
'luci_go': 'git_revision:bac571b5399502fa16ac48a1d3820e1117505085',
'luci_go': 'git_revision:c41d94e382727fc5276cd2771741990543fce337',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling Fuchsia sdk
# and whatever else without interference from each other.
'fuchsia_version': 'version:11.20221219.3.1',
'fuchsia_version': 'version:11.20230131.1.1',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@ -98,18 +101,18 @@ vars = {
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_tools-lint_version
# and whatever else without interference from each other.
'android_sdk_cmdline-tools_version': 'oWlET2yQhaPKQ66tYNuSPaueU78Z9VlxpyxOoUjwRuIC',
'android_sdk_cmdline-tools_version': '3Yn5Sn7BMObm8gsoZCF0loJMKg9_PpgU07G9DObCLdQC',
}
deps = {
'base/trace_event/common':
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556',
Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '68e6038b5350cba18c341cc7c572170af5c5b20c',
'build':
Var('chromium_url') + '/chromium/src/build.git' + '@' + '8b3f1346a4f7f3b89c938e537a9be0e2120a9535',
Var('chromium_url') + '/chromium/src/build.git' + '@' + 'e0df145ecb560e48381b6dccf3b9c8b31aa95bcd',
'buildtools':
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '600a61514a682cdda7952a3ef8c75acd9487fa6b',
Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '295c6e5037e358904aef73a21409896d58547ba6',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7',
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + 'f97059df7f8b205064625cdb5f97b56668a125ef',
'buildtools/linux64': {
'packages': [
{
@ -131,11 +134,11 @@ deps = {
'condition': 'host_os == "mac"',
},
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '7b20455cbdf0891a6e5e2b66609b08c4f407ae5f',
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '59bae40d835ae4eabaddbef781f5e3b778dd7907',
'buildtools/third_party/libc++abi/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '123239cdb67b3d69c5af933e364a84019a33575c',
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'b74d7716111d7eda5c03cb8f5dfc940e1c2c0030',
'buildtools/third_party/libunwind/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '5e22a7fe2335161ab267867c8e1be481bf6c8300',
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'e95b94b74d26f8387d4fb03a687a2fab0ed8e91c',
'buildtools/win': {
'packages': [
{
@ -154,14 +157,14 @@ deps = {
}
],
'dep_type': 'cipd',
'condition': '(host_os == "linux" or host_os == "mac" or host_os == "win") and host_cpu != "s390" and host_cpu != "ppc"',
'condition': '(host_os == "linux" or host_os == "mac" or host_os == "win") and host_cpu != "s390" and host_cpu != "ppc" and host_cpu != "arm64"',
},
'test/benchmarks/data':
Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f',
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'e6c6460a5b94e32e01ce9a9d236f3148d4648ce5',
Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'f00d4118dba5d266d1611ba2cd4e995d3e4b523a',
'third_party/android_ndk': {
'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d',
'condition': 'checkout_android',
@ -209,7 +212,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
'url': Var('chromium_url') + '/catapult.git' + '@' + '1e4341629217ba4a71a976d9c173d13f7c4e63a4',
'url': Var('chromium_url') + '/catapult.git' + '@' + '5a468ccd919e16a29bb3121e3c90f27bf8745942',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@ -217,7 +220,7 @@ deps = {
'condition': 'checkout_android',
},
'third_party/depot_tools':
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '5decb175432cb284b6f8ee102dc1b908b58d8e41',
Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '3d072ab6fb49fd3d2116a41cee66d47c3d409299',
'third_party/fuchsia-sdk/sdk': {
'packages': [
{
@ -234,7 +237,7 @@ deps = {
'third_party/googletest/src':
Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'af29db7ec28d6df1c7f0f745186884091e602e07',
'third_party/icu':
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '1b7d391f0528fb3a4976b7541b387ee04f915f83',
Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '2c51e5cc7e0a06cd4cd7cb2ddbac445af9b475ba',
'third_party/instrumented_libraries':
Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '09ba70cfb2c0d01c60684660e357ae200caf2968',
'third_party/ittapi': {
@ -244,7 +247,7 @@ deps = {
'condition': "checkout_ittapi or check_v8_header_includes",
},
'third_party/jinja2':
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '4633bf431193690c3491244f5a0acbe9ac776233',
Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '264c07d7e64f2874434a3b8039e101ddf1b01e7e',
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '42e892d96e47b1f6e29844cc705e148ec4856448',
'third_party/logdog/logdog':
@ -270,9 +273,9 @@ deps = {
'condition': 'checkout_android',
},
'third_party/zlib':
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '18d27fa10b237fdfcbd8f0c65c19fe009981a3bc',
Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '2d44c51ada6d325b85b53427b02dabf44648bca4',
'tools/clang':
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'cab032b11ddc12804bf4ae8d71a6e0f88bc51ddb',
Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '44e5e39a90511e079e4b9bc2f753059f2fe6ac4b',
'tools/luci-go': {
'packages': [
{
@ -621,6 +624,16 @@ hooks = [
'tools/generate-header-include-checks.py',
],
},
{
'name': 'checkout_v8_builtins_pgo_profiles',
'pattern': '.',
'condition': 'checkout_v8_builtins_pgo_profiles',
'action': [
'python3',
'tools/builtins-pgo/download_profiles.py',
'download',
],
},
{
# Clean up build dirs for crbug.com/1337238.
# After a libc++ roll and revert, .ninja_deps would get into a state

View File

@ -2,4 +2,3 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
vasili.skurydzin@ibm.com

View File

@ -2,4 +2,3 @@ junyan@redhat.com
joransiu@ca.ibm.com
midawson@redhat.com
mfarazma@redhat.com
vasili.skurydzin@ibm.com

View File

@ -101,11 +101,13 @@
'|test/unittests/heap/cppgc/' \
'|test/unittests/heap/cppgc-js/',
},
'trap-handler': {
'filepath': 'src/trap-handler/',
},
},
'WATCHLISTS': {
'maglev': [
'jgruber+watch@chromium.org',
'leszeks+watch@chromium.org',
'verwaest+watch@chromium.org',
'victorgomes+watch@chromium.org',
@ -168,5 +170,11 @@
'cppgc': [
'oilpan-reviews+v8@chromium.org',
],
'trap-handler': [
'ahaas@chromium.org',
'clemensb@chromium.org',
'mark@chromium.org',
'mseaborn@chromium.org',
],
},
}

View File

@ -205,7 +205,7 @@ def v8_binary(
name = "noicu/" + name,
srcs = srcs + noicu_srcs,
deps = deps + noicu_deps + default.deps,
includes = includes + default.includes,
includes = includes + ["noicu/"] + default.includes,
copts = copts + default.copts,
linkopts = linkopts + default.linkopts,
**kwargs
@ -214,7 +214,7 @@ def v8_binary(
name = "icu/" + name,
srcs = srcs + icu_srcs,
deps = deps + icu_deps + default.deps,
includes = includes + default.includes,
includes = includes + ["icu/"] + default.includes,
copts = copts + default.copts + ENABLE_I18N_SUPPORT_DEFINES,
linkopts = linkopts + default.linkopts,
**kwargs
@ -249,7 +249,7 @@ def v8_library(
name = name + "_noicu",
srcs = srcs + noicu_srcs,
deps = deps + noicu_deps + default.deps,
includes = includes + default.includes,
includes = includes + ["noicu/"] + default.includes,
copts = copts + default.copts,
linkopts = linkopts + default.linkopts,
alwayslink = 1,
@ -268,7 +268,7 @@ def v8_library(
name = name + "_icu",
srcs = srcs + icu_srcs,
deps = deps + icu_deps + default.deps,
includes = includes + default.includes,
includes = includes + ["icu/"] + default.includes,
copts = copts + default.copts + ENABLE_I18N_SUPPORT_DEFINES,
linkopts = linkopts + default.linkopts,
alwayslink = 1,
@ -296,7 +296,7 @@ def v8_library(
**kwargs
)
def _torque_impl(ctx):
def _torque_initializers_impl(ctx):
if ctx.workspace_name == "v8":
v8root = "."
else:
@ -317,7 +317,7 @@ def _torque_impl(ctx):
# Generate/declare output files
outs = []
for src in ctx.files.srcs:
root, period, ext = src.path.rpartition(".")
root, _period, _ext = src.path.rpartition(".")
# Strip v8root
if root[:len(v8root)] == v8root:
@ -325,22 +325,19 @@ def _torque_impl(ctx):
file = ctx.attr.prefix + "/torque-generated/" + root
outs.append(ctx.actions.declare_file(file + "-tq-csa.cc"))
outs.append(ctx.actions.declare_file(file + "-tq-csa.h"))
outs.append(ctx.actions.declare_file(file + "-tq-inl.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.cc"))
outs += [ctx.actions.declare_file(ctx.attr.prefix + "/torque-generated/" + f) for f in ctx.attr.extras]
ctx.actions.run(
outputs = outs,
inputs = ctx.files.srcs,
arguments = args,
executable = ctx.executable.tool,
mnemonic = "GenTorque",
progress_message = "Generating Torque files",
mnemonic = "GenTorqueInitializers",
progress_message = "Generating Torque initializers",
)
return [DefaultInfo(files = depset(outs))]
_v8_torque = rule(
implementation = _torque_impl,
_v8_torque_initializers = rule(
implementation = _torque_initializers_impl,
# cfg = v8_target_cpu_transition,
attrs = {
"prefix": attr.string(mandatory = True),
@ -355,8 +352,8 @@ _v8_torque = rule(
},
)
def v8_torque(name, noicu_srcs, icu_srcs, args, extras):
_v8_torque(
def v8_torque_initializers(name, noicu_srcs, icu_srcs, args, extras):
_v8_torque_initializers(
name = "noicu/" + name,
prefix = "noicu",
srcs = noicu_srcs,
@ -367,7 +364,88 @@ def v8_torque(name, noicu_srcs, icu_srcs, args, extras):
"//conditions:default": ":torque",
}),
)
_v8_torque(
_v8_torque_initializers(
name = "icu/" + name,
prefix = "icu",
srcs = icu_srcs,
args = args,
extras = extras,
tool = select({
"@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression",
"//conditions:default": ":torque",
}),
)
def _torque_definitions_impl(ctx):
if ctx.workspace_name == "v8":
v8root = "."
else:
v8root = "external/v8"
# Arguments
args = []
args += ctx.attr.args
args.append("-o")
args.append(ctx.bin_dir.path + "/" + v8root + "/" + ctx.attr.prefix + "/torque-generated")
args.append("-strip-v8-root")
args.append("-v8-root")
args.append(v8root)
# Sources
args += [f.path for f in ctx.files.srcs]
# Generate/declare output files
outs = []
for src in ctx.files.srcs:
root, _period, _ext = src.path.rpartition(".")
# Strip v8root
if root[:len(v8root)] == v8root:
root = root[len(v8root):]
file = ctx.attr.prefix + "/torque-generated/" + root
outs.append(ctx.actions.declare_file(file + "-tq-inl.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.inc"))
outs.append(ctx.actions.declare_file(file + "-tq.cc"))
outs += [ctx.actions.declare_file(ctx.attr.prefix + "/torque-generated/" + f) for f in ctx.attr.extras]
ctx.actions.run(
outputs = outs,
inputs = ctx.files.srcs,
arguments = args,
executable = ctx.executable.tool,
mnemonic = "GenTorqueDefinitions",
progress_message = "Generating Torque definitions",
)
return [DefaultInfo(files = depset(outs))]
_v8_torque_definitions = rule(
implementation = _torque_definitions_impl,
# cfg = v8_target_cpu_transition,
attrs = {
"prefix": attr.string(mandatory = True),
"srcs": attr.label_list(allow_files = True, mandatory = True),
"extras": attr.string_list(),
"tool": attr.label(
allow_files = True,
executable = True,
cfg = "exec",
),
"args": attr.string_list(),
},
)
def v8_torque_definitions(name, noicu_srcs, icu_srcs, args, extras):
_v8_torque_definitions(
name = "noicu/" + name,
prefix = "noicu",
srcs = noicu_srcs,
args = args,
extras = extras,
tool = select({
"@v8//bazel/config:v8_target_is_32_bits": ":torque_non_pointer_compression",
"//conditions:default": ":torque",
}),
)
_v8_torque_definitions(
name = "icu/" + name,
prefix = "icu",
srcs = icu_srcs,
@ -533,7 +611,17 @@ def build_config_content(cpu, icu):
("v8_enable_single_generation", "false"),
("v8_enable_sandbox", "false"),
("v8_enable_shared_ro_heap", "false"),
("v8_disable_write_barriers", "false"),
("v8_target_cpu", cpu),
("v8_code_comments", "false"),
("v8_enable_debug_code", "false"),
("v8_enable_verify_heap", "false"),
("v8_enable_slow_dchecks", "false"),
("v8_enable_maglev", "false"),
("v8_enable_turbofan", "true"),
("v8_enable_disassembler", "false"),
("is_DEBUG_defined", "false"),
("v8_enable_gdbjit", "false"),
])
# TODO(victorgomes): Create a rule (instead of a macro), that can

View File

@ -104,6 +104,10 @@ if (v8_snapshot_toolchain == "") {
# cross compile Windows arm64 with host toolchain.
v8_snapshot_toolchain = host_toolchain
}
} else if (host_cpu == "arm64" && current_cpu == "arm64" &&
host_os == "mac") {
# cross compile iOS arm64 with host_toolchain
v8_snapshot_toolchain = host_toolchain
}
}

View File

@ -63,10 +63,19 @@ declare_args() {
# Sets -DV8_LITE_MODE.
v8_enable_lite_mode = false
# Enable the Turbofan compiler.
# Sets -dV8_ENABLE_TURBOFAN.
v8_enable_turbofan = ""
# Enable the Maglev compiler.
# Sets -dV8_ENABLE_MAGLEV
v8_enable_maglev = ""
# Include support for WebAssembly. If disabled, the 'WebAssembly' global
# will not be available, and embedder APIs to generate WebAssembly modules
# will fail. Also, asm.js will not be translated to WebAssembly and will be
# executed as standard JavaScript instead.
# Sets -dV8_ENABLE_WEBASSEMBLY.
v8_enable_webassembly = ""
# Enable 256-bit long vector re-vectorization pass in WASM compilation pipeline.
@ -97,6 +106,12 @@ declare_args() {
# Enable young generation in cppgc.
cppgc_enable_young_generation = false
# Enables a slim write barrier that only performs a single check in the fast
# path and delegates all further checks to a slow path call. This is fast
# in a setting with few slow-path checks, i.e., with disabled young generation
# GC.
cppgc_enable_slim_write_barrier = true
# Enable pointer compression in cppgc.
cppgc_enable_pointer_compression = false
@ -135,6 +150,13 @@ if (v8_enable_webassembly == "") {
assert(!(v8_enable_webassembly && v8_enable_lite_mode),
"Webassembly is not available in lite mode.")
# Turbofan is enabled by default, except in lite mode.
if (v8_enable_turbofan == "") {
v8_enable_turbofan = !v8_enable_lite_mode
}
assert(v8_enable_turbofan || !v8_enable_webassembly,
"Webassembly is not available when Turbofan is disabled.")
# Points to // in v8 stand-alone or to //v8/ in chromium. We need absolute
# paths for all configs in templates as they are shared in different
# subdirectories.
@ -280,6 +302,7 @@ template("v8_executable") {
template("v8_component") {
component(target_name) {
output_name = target_name
forward_variables_from(invoker,
"*",
[

View File

@ -148,10 +148,11 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicCrossThreadPersistent(
internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy>
MemberCheckingPolicy, MemberStorageType>
member,
const SourceLocation& loc = SourceLocation::Current())
: BasicCrossThreadPersistent(member.Get(), loc) {}
@ -230,10 +231,11 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
// Assignment from member.
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicCrossThreadPersistent& operator=(
internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy>
MemberCheckingPolicy, MemberStorageType>
member) {
return operator=(member.Get());
}

View File

@ -62,10 +62,10 @@ class HeapConsistency final {
* \returns whether a write barrier is needed and which barrier to invoke.
*/
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
typename CheckingPolicy, typename StorageType>
static V8_INLINE WriteBarrierType GetWriteBarrierType(
const internal::BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& value,
CheckingPolicy, StorageType>& value,
WriteBarrierParams& params) {
return internal::WriteBarrier::GetWriteBarrierType(
value.GetRawSlot(), value.GetRawStorage(), params);

View File

@ -10,6 +10,7 @@
#include <type_traits>
#include "cppgc/internal/finalizer-trait.h"
#include "cppgc/internal/logging.h"
#include "cppgc/internal/name-trait.h"
#include "cppgc/trace-trait.h"
#include "v8config.h" // NOLINT(build/include_directory)
@ -20,12 +21,12 @@ namespace internal {
using GCInfoIndex = uint16_t;
struct V8_EXPORT EnsureGCInfoIndexTrait final {
// Acquires a new GC info object and returns the index. In addition, also
// updates `registered_index` atomically.
// Acquires a new GC info object and updates `registered_index` with the index
// that identifies that new info accordingly.
template <typename T>
V8_INLINE static GCInfoIndex EnsureIndex(
V8_INLINE static void EnsureIndex(
std::atomic<GCInfoIndex>& registered_index) {
return EnsureGCInfoIndexTraitDispatch<T>{}(registered_index);
EnsureGCInfoIndexTraitDispatch<T>{}(registered_index);
}
private:
@ -34,38 +35,32 @@ struct V8_EXPORT EnsureGCInfoIndexTrait final {
bool = NameTrait<T>::HasNonHiddenName()>
struct EnsureGCInfoIndexTraitDispatch;
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback,
NameCallback);
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback);
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback, NameCallback);
static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback,
NameCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
FinalizationCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback,
NameCallback);
static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
TraceCallback);
static void V8_PRESERVE_MOST
EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback,
FinalizationCallback, NameCallback);
static void V8_PRESERVE_MOST EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>&, TraceCallback, FinalizationCallback);
static void V8_PRESERVE_MOST EnsureGCInfoIndexPolymorphic(
std::atomic<GCInfoIndex>&, TraceCallback, NameCallback);
static void V8_PRESERVE_MOST
EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback);
static void V8_PRESERVE_MOST
EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback,
FinalizationCallback, NameCallback);
static void V8_PRESERVE_MOST EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>&, TraceCallback, FinalizationCallback);
static void V8_PRESERVE_MOST EnsureGCInfoIndexNonPolymorphic(
std::atomic<GCInfoIndex>&, TraceCallback, NameCallback);
static void V8_PRESERVE_MOST
EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&, TraceCallback);
};
#define DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function) \
template <typename T> \
struct EnsureGCInfoIndexTrait::EnsureGCInfoIndexTraitDispatch< \
T, is_polymorphic, has_finalizer, has_non_hidden_name> { \
V8_INLINE GCInfoIndex \
operator()(std::atomic<GCInfoIndex>& registered_index) { \
return function; \
V8_INLINE void operator()(std::atomic<GCInfoIndex>& registered_index) { \
function; \
} \
};
@ -143,9 +138,16 @@ struct GCInfoTrait final {
static_assert(sizeof(T), "T must be fully defined");
static std::atomic<GCInfoIndex>
registered_index; // Uses zero initialization.
const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
return index ? index
: EnsureGCInfoIndexTrait::EnsureIndex<T>(registered_index);
GCInfoIndex index = registered_index.load(std::memory_order_acquire);
if (V8_UNLIKELY(!index)) {
EnsureGCInfoIndexTrait::EnsureIndex<T>(registered_index);
// Slow path call uses V8_PRESERVE_MOST which does not support return
// values (also preserves RAX). Avoid out parameter by just reloading the
// value here which at this point is guaranteed to be set.
index = registered_index.load(std::memory_order_acquire);
CPPGC_DCHECK(index != 0);
}
return index;
}
};

View File

@ -225,9 +225,9 @@ class V8_TRIVIAL_ABI RawPointer final {
};
#if defined(CPPGC_POINTER_COMPRESSION)
using MemberStorage = CompressedPointer;
using DefaultMemberStorage = CompressedPointer;
#else // !defined(CPPGC_POINTER_COMPRESSION)
using MemberStorage = RawPointer;
using DefaultMemberStorage = RawPointer;
#endif // !defined(CPPGC_POINTER_COMPRESSION)
} // namespace internal

View File

@ -34,18 +34,29 @@ struct DijkstraWriteBarrierPolicy {
}
V8_INLINE static void AssigningBarrier(const void* slot, const void* value) {
#ifdef CPPGC_SLIM_WRITE_BARRIER
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
WriteBarrier::CombinedWriteBarrierSlow(slot);
#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, value, params);
WriteBarrier(type, params, slot, value);
#endif // !CPPGC_SLIM_WRITE_BARRIER
}
template <typename MemberStorage>
V8_INLINE static void AssigningBarrier(const void* slot,
MemberStorage storage) {
#ifdef CPPGC_SLIM_WRITE_BARRIER
if (V8_UNLIKELY(WriteBarrier::IsEnabled()))
WriteBarrier::CombinedWriteBarrierSlow(slot);
#else // !CPPGC_SLIM_WRITE_BARRIER
WriteBarrier::Params params;
const WriteBarrier::Type type =
WriteBarrier::GetWriteBarrierType(slot, storage, params);
WriteBarrier(type, params, slot, storage.Load());
#endif // !CPPGC_SLIM_WRITE_BARRIER
}
private:
@ -69,6 +80,7 @@ struct DijkstraWriteBarrierPolicy {
struct NoWriteBarrierPolicy {
V8_INLINE static void InitializingBarrier(const void*, const void*) {}
V8_INLINE static void AssigningBarrier(const void*, const void*) {}
template <typename MemberStorage>
V8_INLINE static void AssigningBarrier(const void*, MemberStorage) {}
};
@ -197,7 +209,8 @@ template <typename T, typename WeaknessPolicy,
typename CheckingPolicy = DefaultPersistentCheckingPolicy>
class BasicPersistent;
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy = DefaultMemberCheckingPolicy>
typename CheckingPolicy = DefaultMemberCheckingPolicy,
typename StorageType = DefaultMemberStorage>
class BasicMember;
} // namespace internal

View File

@ -70,6 +70,7 @@ class V8_EXPORT WriteBarrier final {
static V8_INLINE Type GetWriteBarrierType(const void* slot, const void* value,
Params& params);
// Returns the required write barrier for a given `slot` and `value`.
template <typename MemberStorage>
static V8_INLINE Type GetWriteBarrierType(const void* slot, MemberStorage,
Params& params);
// Returns the required write barrier for a given `slot`.
@ -79,6 +80,14 @@ class V8_EXPORT WriteBarrier final {
// Returns the required write barrier for a given `value`.
static V8_INLINE Type GetWriteBarrierType(const void* value, Params& params);
#ifdef CPPGC_SLIM_WRITE_BARRIER
// A write barrier that combines `GenerationalBarrier()` and
// `DijkstraMarkingBarrier()`. We only pass a single parameter here to clobber
// as few registers as possible.
static V8_NOINLINE void V8_PRESERVE_MOST
CombinedWriteBarrierSlow(const void* slot);
#endif // CPPGC_SLIM_WRITE_BARRIER
static V8_INLINE void DijkstraMarkingBarrier(const Params& params,
const void* object);
static V8_INLINE void DijkstraMarkingBarrierRange(
@ -163,7 +172,8 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
return ValueModeDispatch<value_mode>::Get(slot, value, params, callback);
}
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback,
typename MemberStorage>
static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
@ -207,7 +217,7 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
template <>
struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
WriteBarrier::ValueMode::kValuePresent> {
template <typename HeapHandleCallback>
template <typename HeapHandleCallback, typename MemberStorage>
static V8_INLINE WriteBarrier::Type Get(const void* slot,
MemberStorage storage,
WriteBarrier::Params& params,
@ -305,11 +315,9 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final {
}
template <WriteBarrier::ValueMode value_mode, typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, MemberStorage value,
static V8_INLINE WriteBarrier::Type Get(const void* slot, RawPointer value,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
// `MemberStorage` will always be `RawPointer` for non-caged heap builds.
// Just convert to `void*` in this case.
return ValueModeDispatch<value_mode>::Get(slot, value.Load(), params,
callback);
}
@ -383,6 +391,7 @@ WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
}
// static
template <typename MemberStorage>
WriteBarrier::Type WriteBarrier::GetWriteBarrierType(
const void* slot, MemberStorage value, WriteBarrier::Params& params) {
return WriteBarrierTypePolicy::Get<ValueMode::kValuePresent>(slot, value,

View File

@ -28,13 +28,11 @@ namespace internal {
// MemberBase always refers to the object as const object and defers to
// BasicMember on casting to the right type as needed.
template <typename StorageType>
class V8_TRIVIAL_ABI MemberBase {
public:
#if defined(CPPGC_POINTER_COMPRESSION)
using RawStorage = CompressedPointer;
#else // !defined(CPPGC_POINTER_COMPRESSION)
using RawStorage = RawPointer;
#endif // !defined(CPPGC_POINTER_COMPRESSION)
using RawStorage = StorageType;
protected:
struct AtomicInitializerTag {};
@ -75,16 +73,19 @@ class V8_TRIVIAL_ABI MemberBase {
// The basic class from which all Member classes are 'generated'.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
typename CheckingPolicy, typename StorageType>
class V8_TRIVIAL_ABI BasicMember final : private MemberBase<StorageType>,
private CheckingPolicy {
using Base = MemberBase<StorageType>;
public:
using PointeeType = T;
using RawStorage = typename Base::RawStorage;
V8_INLINE constexpr BasicMember() = default;
V8_INLINE constexpr BasicMember(std::nullptr_t) {} // NOLINT
V8_INLINE BasicMember(SentinelPointer s) : MemberBase(s) {} // NOLINT
V8_INLINE BasicMember(T* raw) : MemberBase(raw) { // NOLINT
V8_INLINE constexpr BasicMember(std::nullptr_t) {} // NOLINT
V8_INLINE BasicMember(SentinelPointer s) : Base(s) {} // NOLINT
V8_INLINE BasicMember(T* raw) : Base(raw) { // NOLINT
InitializingWriteBarrier(raw);
this->CheckPointer(Get());
}
@ -94,13 +95,13 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
// Atomic ctor. Using the AtomicInitializerTag forces BasicMember to
// initialize using atomic assignments. This is required for preventing
// data races with concurrent marking.
using AtomicInitializerTag = MemberBase::AtomicInitializerTag;
using AtomicInitializerTag = typename Base::AtomicInitializerTag;
V8_INLINE BasicMember(std::nullptr_t, AtomicInitializerTag atomic)
: MemberBase(nullptr, atomic) {}
: Base(nullptr, atomic) {}
V8_INLINE BasicMember(SentinelPointer s, AtomicInitializerTag atomic)
: MemberBase(s, atomic) {}
: Base(s, atomic) {}
V8_INLINE BasicMember(T* raw, AtomicInitializerTag atomic)
: MemberBase(raw, atomic) {
: Base(raw, atomic) {
InitializingWriteBarrier(raw);
this->CheckPointer(Get());
}
@ -119,7 +120,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
std::enable_if_t<internal::IsDecayedSameV<T, U>>* = nullptr>
V8_INLINE BasicMember( // NOLINT
const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>& other)
OtherCheckingPolicy, StorageType>& other)
: BasicMember(other.GetRawStorage()) {}
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
@ -127,7 +128,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
std::enable_if_t<internal::IsStrictlyBaseOfV<T, U>>* = nullptr>
V8_INLINE BasicMember( // NOLINT
const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>& other)
OtherCheckingPolicy, StorageType>& other)
: BasicMember(other.Get()) {}
// Move ctor.
@ -142,8 +143,9 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
typename OtherCheckingPolicy,
std::enable_if_t<internal::IsDecayedSameV<T, U>>* = nullptr>
V8_INLINE BasicMember(BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>&& other) noexcept
V8_INLINE BasicMember(
BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy, OtherCheckingPolicy,
StorageType>&& other) noexcept
: BasicMember(other.GetRawStorage()) {
other.Clear();
}
@ -151,8 +153,9 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U, typename OtherBarrierPolicy, typename OtherWeaknessTag,
typename OtherCheckingPolicy,
std::enable_if_t<internal::IsStrictlyBaseOfV<T, U>>* = nullptr>
V8_INLINE BasicMember(BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>&& other) noexcept
V8_INLINE BasicMember(
BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy, OtherCheckingPolicy,
StorageType>&& other) noexcept
: BasicMember(other.Get()) {
other.Clear();
}
@ -179,7 +182,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
typename OtherCheckingPolicy>
V8_INLINE BasicMember& operator=(
const BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>& other) {
OtherCheckingPolicy, StorageType>& other) {
if constexpr (internal::IsDecayedSameV<T, U>) {
return operator=(other.GetRawStorage());
} else {
@ -201,8 +204,8 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U, typename OtherWeaknessTag, typename OtherBarrierPolicy,
typename OtherCheckingPolicy>
V8_INLINE BasicMember& operator=(
BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>&& other) noexcept {
BasicMember<U, OtherWeaknessTag, OtherBarrierPolicy, OtherCheckingPolicy,
StorageType>&& other) noexcept {
if constexpr (internal::IsDecayedSameV<T, U>) {
operator=(other.GetRawStorage());
} else {
@ -226,7 +229,7 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
}
V8_INLINE BasicMember& operator=(T* other) {
SetRawAtomic(other);
Base::SetRawAtomic(other);
AssigningWriteBarrier(other);
this->CheckPointer(Get());
return *this;
@ -237,20 +240,20 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
return *this;
}
V8_INLINE BasicMember& operator=(SentinelPointer s) {
SetRawAtomic(s);
Base::SetRawAtomic(s);
return *this;
}
template <typename OtherWeaknessTag, typename OtherBarrierPolicy,
typename OtherCheckingPolicy>
V8_INLINE void Swap(BasicMember<T, OtherWeaknessTag, OtherBarrierPolicy,
OtherCheckingPolicy>& other) {
OtherCheckingPolicy, StorageType>& other) {
auto tmp = GetRawStorage();
*this = other;
other = tmp;
}
V8_INLINE explicit operator bool() const { return !IsCleared(); }
V8_INLINE explicit operator bool() const { return !Base::IsCleared(); }
V8_INLINE operator T*() const { return Get(); }
V8_INLINE T* operator->() const { return Get(); }
V8_INLINE T& operator*() const { return *Get(); }
@ -264,10 +267,12 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
// The const_cast below removes the constness from MemberBase storage. The
// following static_cast re-adds any constness if specified through the
// user-visible template parameter T.
return static_cast<T*>(const_cast<void*>(MemberBase::GetRaw()));
return static_cast<T*>(const_cast<void*>(Base::GetRaw()));
}
V8_INLINE void Clear() { SetRawStorageAtomic(RawStorage{}); }
V8_INLINE void Clear() {
Base::SetRawStorageAtomic(RawStorage{});
}
V8_INLINE T* Release() {
T* result = Get();
@ -276,41 +281,42 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
}
V8_INLINE const T** GetSlotForTesting() const {
return reinterpret_cast<const T**>(GetRawSlot());
return reinterpret_cast<const T**>(Base::GetRawSlot());
}
V8_INLINE RawStorage GetRawStorage() const {
return MemberBase::GetRawStorage();
return Base::GetRawStorage();
}
private:
V8_INLINE explicit BasicMember(RawStorage raw) : MemberBase(raw) {
V8_INLINE explicit BasicMember(RawStorage raw) : Base(raw) {
InitializingWriteBarrier(Get());
this->CheckPointer(Get());
}
V8_INLINE BasicMember& operator=(RawStorage other) {
SetRawStorageAtomic(other);
Base::SetRawStorageAtomic(other);
AssigningWriteBarrier();
this->CheckPointer(Get());
return *this;
}
V8_INLINE const T* GetRawAtomic() const {
return static_cast<const T*>(MemberBase::GetRawAtomic());
return static_cast<const T*>(Base::GetRawAtomic());
}
V8_INLINE void InitializingWriteBarrier(T* value) const {
WriteBarrierPolicy::InitializingBarrier(GetRawSlot(), value);
WriteBarrierPolicy::InitializingBarrier(Base::GetRawSlot(), value);
}
V8_INLINE void AssigningWriteBarrier(T* value) const {
WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), value);
WriteBarrierPolicy::AssigningBarrier(Base::GetRawSlot(), value);
}
V8_INLINE void AssigningWriteBarrier() const {
WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), GetRawStorage());
WriteBarrierPolicy::AssigningBarrier(Base::GetRawSlot(),
Base::GetRawStorage());
}
V8_INLINE void ClearFromGC() const { MemberBase::ClearFromGC(); }
V8_INLINE void ClearFromGC() const { Base::ClearFromGC(); }
V8_INLINE T* GetFromGC() const { return Get(); }
@ -319,19 +325,20 @@ class V8_TRIVIAL_ABI BasicMember final : private MemberBase,
template <typename U>
friend struct cppgc::TraceTrait;
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1>
typename CheckingPolicy1, typename StorageType1>
friend class BasicMember;
};
// Member equality operators.
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator==(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
if constexpr (internal::IsDecayedSameV<T1, T2>) {
// Check compressed pointers if types are the same.
return member1.GetRawStorage() == member2.GetRawStorage();
@ -345,31 +352,32 @@ V8_INLINE bool operator==(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator!=(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
return !(member1 == member2);
}
// Equality with raw pointers.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy, typename U>
V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
U* raw) {
typename CheckingPolicy, typename StorageType, typename U>
V8_INLINE bool operator==(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
U* raw) {
// Never allow comparison with erased pointers.
static_assert(!internal::IsDecayedSameV<void, U>);
if constexpr (internal::IsDecayedSameV<T, U>) {
// Check compressed pointers if types are the same.
return member.GetRawStorage() == MemberBase::RawStorage(raw);
return member.GetRawStorage() == StorageType(raw);
} else if constexpr (internal::IsStrictlyBaseOfV<T, U>) {
// Cast the raw pointer to T, which may adjust the pointer.
return member.GetRawStorage() ==
MemberBase::RawStorage(static_cast<T*>(raw));
return member.GetRawStorage() == StorageType(static_cast<T*>(raw));
} else {
// Otherwise, decompressed the member.
return member.Get() == raw;
@ -377,104 +385,112 @@ V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy, typename U>
V8_INLINE bool operator!=(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
U* raw) {
typename CheckingPolicy, typename StorageType, typename U>
V8_INLINE bool operator!=(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
U* raw) {
return !(member == raw);
}
template <typename T, typename U, typename WeaknessTag,
typename WriteBarrierPolicy, typename CheckingPolicy>
V8_INLINE bool operator==(T* raw,
const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename WriteBarrierPolicy, typename CheckingPolicy,
typename StorageType>
V8_INLINE bool operator==(
T* raw, const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return member == raw;
}
template <typename T, typename U, typename WeaknessTag,
typename WriteBarrierPolicy, typename CheckingPolicy>
V8_INLINE bool operator!=(T* raw,
const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename WriteBarrierPolicy, typename CheckingPolicy,
typename StorageType>
V8_INLINE bool operator!=(
T* raw, const BasicMember<U, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return !(raw == member);
}
// Equality with sentinel.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
SentinelPointer) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator==(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
SentinelPointer) {
return member.GetRawStorage().IsSentinel();
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator!=(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
SentinelPointer s) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator!=(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
SentinelPointer s) {
return !(member == s);
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator==(SentinelPointer s,
const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator==(
SentinelPointer s, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return member == s;
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator!=(SentinelPointer s,
const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator!=(
SentinelPointer s, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return !(s == member);
}
// Equality with nullptr.
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator==(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
std::nullptr_t) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator==(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
std::nullptr_t) {
return !static_cast<bool>(member);
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator!=(const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member,
std::nullptr_t n) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator!=(
const BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>& member,
std::nullptr_t n) {
return !(member == n);
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator==(std::nullptr_t n,
const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator==(
std::nullptr_t n, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return member == n;
}
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
V8_INLINE bool operator!=(std::nullptr_t n,
const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy>& member) {
typename CheckingPolicy, typename StorageType>
V8_INLINE bool operator!=(
std::nullptr_t n, const BasicMember<T, WeaknessTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>& member) {
return !(n == member);
}
// Relational operators.
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator<(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
@ -483,12 +499,13 @@ V8_INLINE bool operator<(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator<=(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
@ -497,12 +514,13 @@ V8_INLINE bool operator<=(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator>(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
@ -511,21 +529,23 @@ V8_INLINE bool operator>(
template <typename T1, typename WeaknessTag1, typename WriteBarrierPolicy1,
typename CheckingPolicy1, typename T2, typename WeaknessTag2,
typename WriteBarrierPolicy2, typename CheckingPolicy2>
typename WriteBarrierPolicy2, typename CheckingPolicy2,
typename StorageType>
V8_INLINE bool operator>=(
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1>&
member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2>&
member2) {
const BasicMember<T1, WeaknessTag1, WriteBarrierPolicy1, CheckingPolicy1,
StorageType>& member1,
const BasicMember<T2, WeaknessTag2, WriteBarrierPolicy2, CheckingPolicy2,
StorageType>& member2) {
static_assert(
internal::IsDecayedSameV<T1, T2>,
"Comparison works only for same pointer type modulo cv-qualifiers");
return member1.GetRawStorage() >= member2.GetRawStorage();
}
template <typename T, typename WriteBarrierPolicy, typename CheckingPolicy>
struct IsWeak<
internal::BasicMember<T, WeakMemberTag, WriteBarrierPolicy, CheckingPolicy>>
template <typename T, typename WriteBarrierPolicy, typename CheckingPolicy,
typename StorageType>
struct IsWeak<internal::BasicMember<T, WeakMemberTag, WriteBarrierPolicy,
CheckingPolicy, StorageType>>
: std::true_type {};
} // namespace internal
@ -536,8 +556,9 @@ struct IsWeak<
* trace method.
*/
template <typename T>
using Member = internal::BasicMember<T, internal::StrongMemberTag,
internal::DijkstraWriteBarrierPolicy>;
using Member = internal::BasicMember<
T, internal::StrongMemberTag, internal::DijkstraWriteBarrierPolicy,
internal::DefaultMemberCheckingPolicy, internal::DefaultMemberStorage>;
/**
* WeakMember is similar to Member in that it is used to point to other garbage
@ -548,8 +569,9 @@ using Member = internal::BasicMember<T, internal::StrongMemberTag,
* will automatically be set to null.
*/
template <typename T>
using WeakMember = internal::BasicMember<T, internal::WeakMemberTag,
internal::DijkstraWriteBarrierPolicy>;
using WeakMember = internal::BasicMember<
T, internal::WeakMemberTag, internal::DijkstraWriteBarrierPolicy,
internal::DefaultMemberCheckingPolicy, internal::DefaultMemberStorage>;
/**
* UntracedMember is a pointer to an on-heap object that is not traced for some
@ -558,8 +580,22 @@ using WeakMember = internal::BasicMember<T, internal::WeakMemberTag,
* must be kept alive through other means.
*/
template <typename T>
using UntracedMember = internal::BasicMember<T, internal::UntracedMemberTag,
internal::NoWriteBarrierPolicy>;
using UntracedMember = internal::BasicMember<
T, internal::UntracedMemberTag, internal::NoWriteBarrierPolicy,
internal::DefaultMemberCheckingPolicy, internal::DefaultMemberStorage>;
namespace subtle {
/**
* UncompressedMember. Use with care in hot paths that would otherwise cause
* many decompression cycles.
*/
template <typename T>
using UncompressedMember = internal::BasicMember<
T, internal::StrongMemberTag, internal::DijkstraWriteBarrierPolicy,
internal::DefaultMemberCheckingPolicy, internal::RawPointer>;
} // namespace subtle
} // namespace cppgc

View File

@ -114,11 +114,12 @@ class BasicPersistent final : public PersistentBase,
// Constructor from member.
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicPersistent(
const internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy>& member,
const SourceLocation& loc = SourceLocation::Current())
BasicPersistent(const internal::BasicMember<
U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy, MemberStorageType>& member,
const SourceLocation& loc = SourceLocation::Current())
: BasicPersistent(member.Get(), loc) {}
~BasicPersistent() { Clear(); }
@ -154,10 +155,12 @@ class BasicPersistent final : public PersistentBase,
// Assignment from member.
template <typename U, typename MemberBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType,
typename = std::enable_if_t<std::is_base_of<T, U>::value>>
BasicPersistent& operator=(
const internal::BasicMember<U, MemberBarrierPolicy, MemberWeaknessTag,
MemberCheckingPolicy>& member) {
MemberCheckingPolicy, MemberStorageType>&
member) {
return operator=(member.Get());
}
@ -286,36 +289,39 @@ bool operator!=(const BasicPersistent<T1, WeaknessPolicy1, LocationPolicy1,
template <typename T1, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
typename T2, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy>
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType>
bool operator==(
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p,
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
MemberCheckingPolicy>& m) {
MemberCheckingPolicy, MemberStorageType>& m) {
return p.Get() == m.Get();
}
template <typename T1, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy,
typename T2, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy>
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename MemberStorageType>
bool operator!=(
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p,
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
MemberCheckingPolicy>& m) {
MemberCheckingPolicy, MemberStorageType>& m) {
return !(p == m);
}
template <typename T1, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename T2, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
typename MemberStorageType, typename T2,
typename PersistentWeaknessPolicy, typename PersistentLocationPolicy,
typename PersistentCheckingPolicy>
bool operator==(
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
MemberCheckingPolicy>& m,
MemberCheckingPolicy, MemberStorageType>& m,
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p) {
@ -324,11 +330,12 @@ bool operator==(
template <typename T1, typename MemberWriteBarrierPolicy,
typename MemberWeaknessTag, typename MemberCheckingPolicy,
typename T2, typename PersistentWeaknessPolicy,
typename PersistentLocationPolicy, typename PersistentCheckingPolicy>
typename MemberStorageType, typename T2,
typename PersistentWeaknessPolicy, typename PersistentLocationPolicy,
typename PersistentCheckingPolicy>
bool operator!=(
const BasicMember<T2, MemberWeaknessTag, MemberWriteBarrierPolicy,
MemberCheckingPolicy>& m,
MemberCheckingPolicy, MemberStorageType>& m,
const BasicPersistent<T1, PersistentWeaknessPolicy,
PersistentLocationPolicy, PersistentCheckingPolicy>&
p) {

View File

@ -16,7 +16,7 @@ class Visitor;
namespace internal {
template <typename T, typename WeaknessTag, typename WriteBarrierPolicy,
typename CheckingPolicy>
typename CheckingPolicy, typename StorageType>
class BasicMember;
struct DijkstraWriteBarrierPolicy;
struct NoWriteBarrierPolicy;
@ -126,9 +126,10 @@ template <typename BasicMemberCandidate, typename WeaknessTag,
typename WriteBarrierPolicy>
struct IsSubclassOfBasicMemberTemplate {
private:
template <typename T, typename CheckingPolicy>
template <typename T, typename CheckingPolicy, typename StorageType>
static std::true_type SubclassCheck(
BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy>*);
BasicMember<T, WeaknessTag, WriteBarrierPolicy, CheckingPolicy,
StorageType>*);
static std::false_type SubclassCheck(...);
public:

View File

@ -99,6 +99,20 @@ class V8_EXPORT Visitor {
&HandleWeak<WeakMember<T>>, &weak_member);
}
#if defined(CPPGC_POINTER_COMPRESSION)
/**
* Trace method for UncompressedMember.
*
* \param member UncompressedMember reference retaining an object.
*/
template <typename T>
void Trace(const subtle::UncompressedMember<T>& member) {
const T* value = member.GetRawAtomic();
CPPGC_DCHECK(value != kSentinelPointer);
TraceImpl(value);
}
#endif // defined(CPPGC_POINTER_COMPRESSION)
/**
* Trace method for inlined objects that are not allocated themselves but
* otherwise follow managed heap layout and have a Trace() method.

View File

@ -1402,6 +1402,13 @@ domain Runtime
optional string objectGroup
# Whether to throw an exception if side effect cannot be ruled out during evaluation.
experimental optional boolean throwOnSideEffect
# An alternative way to specify the execution context to call function on.
# Compared to contextId that may be reused across processes, this is guaranteed to be
# system-unique, so it can be used to prevent accidental function call
# in context different than intended (e.g. as a result of navigation across process
# boundaries).
# This is mutually exclusive with `executionContextId`.
experimental optional string uniqueContextId
# Whether the result should contain `webDriverValue`, serialized according to
# https://w3c.github.io/webdriver-bidi. This is mutually exclusive with `returnByValue`, but
# resulting `objectId` is still provided.
@ -1734,7 +1741,9 @@ domain Runtime
event executionContextDestroyed
parameters
# Id of the destroyed context
ExecutionContextId executionContextId
deprecated ExecutionContextId executionContextId
# Unique Id of the destroyed context
experimental string executionContextUniqueId
# Issued when all executionContexts were cleared in browser
event executionContextsCleared

View File

@ -328,6 +328,10 @@ using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
// --- Callback for checking if WebAssembly exceptions are enabled ---
using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
// --- Callback for checking if WebAssembly GC is enabled ---
// If the callback returns true, it will also enable Wasm stringrefs.
using WasmGCEnabledCallback = bool (*)(Local<Context> context);
// --- Callback for checking if the SharedArrayBuffer constructor is enabled ---
using SharedArrayBufferConstructorEnabledCallback =
bool (*)(Local<Context> context);

View File

@ -85,6 +85,7 @@ struct CppHeapCreateParams {
inline CppHeapCreateParams(WrapperDescriptor wrapper_descriptor)
: wrapper_descriptor(wrapper_descriptor) {}
std::vector<std::unique_ptr<cppgc::CustomSpaceBase>> custom_spaces;
WrapperDescriptor wrapper_descriptor;
/**

View File

@ -5,27 +5,14 @@
#ifndef INCLUDE_V8_EMBEDDER_HEAP_H_
#define INCLUDE_V8_EMBEDDER_HEAP_H_
#include <stddef.h>
#include <stdint.h>
#include <utility>
#include <vector>
#include "cppgc/common.h"
#include "v8-local-handle.h" // NOLINT(build/include_directory)
#include "v8-traced-handle.h" // NOLINT(build/include_directory)
#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
class Data;
class Isolate;
class Value;
namespace internal {
class LocalEmbedderHeapTracer;
} // namespace internal
/**
* Handler for embedder roots on non-unified heap garbage collections.
*/
@ -62,162 +49,6 @@ class V8_EXPORT EmbedderRootsHandler {
virtual void ResetRoot(const v8::TracedReference<v8::Value>& handle) = 0;
};
/**
* Interface for tracing through the embedder heap. During a V8 garbage
* collection, V8 collects hidden fields of all potential wrappers, and at the
* end of its marking phase iterates the collection and asks the embedder to
* trace through its heap and use reporter to report each JavaScript object
* reachable from any of the given wrappers.
*/
class V8_EXPORT
// GCC doesn't like combining __attribute__(()) with [[deprecated]].
#ifdef __clang__
V8_DEPRECATED("Use CppHeap when working with v8::TracedReference.")
#endif // __clang__
EmbedderHeapTracer {
public:
using EmbedderStackState = cppgc::EmbedderStackState;
enum TraceFlags : uint64_t {
kNoFlags = 0,
kReduceMemory = 1 << 0,
kForced = 1 << 2,
};
/**
* Interface for iterating through |TracedReference| handles.
*/
class V8_EXPORT TracedGlobalHandleVisitor {
public:
virtual ~TracedGlobalHandleVisitor() = default;
virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
};
/**
* Summary of a garbage collection cycle. See |TraceEpilogue| on how the
* summary is reported.
*/
struct TraceSummary {
/**
* Time spent managing the retained memory in milliseconds. This can e.g.
* include the time tracing through objects in the embedder.
*/
double time = 0.0;
/**
* Memory retained by the embedder through the |EmbedderHeapTracer|
* mechanism in bytes.
*/
size_t allocated_size = 0;
};
virtual ~EmbedderHeapTracer() = default;
/**
* Iterates all |TracedReference| handles created for the |v8::Isolate| the
* tracer is attached to.
*/
void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor);
/**
* Called by the embedder to set the start of the stack which is e.g. used by
* V8 to determine whether handles are used from stack or heap.
*/
void SetStackStart(void* stack_start);
/**
* Called by v8 to register internal fields of found wrappers.
*
* The embedder is expected to store them somewhere and trace reachable
* wrappers from them when called through |AdvanceTracing|.
*/
virtual void RegisterV8References(
const std::vector<std::pair<void*, void*>>& embedder_fields) = 0;
void RegisterEmbedderReference(const BasicTracedReference<v8::Data>& ref);
/**
* Called at the beginning of a GC cycle.
*/
virtual void TracePrologue(TraceFlags flags) {}
/**
* Called to advance tracing in the embedder.
*
* The embedder is expected to trace its heap starting from wrappers reported
* by RegisterV8References method, and report back all reachable wrappers.
* Furthermore, the embedder is expected to stop tracing by the given
* deadline. A deadline of infinity means that tracing should be finished.
*
* Returns |true| if tracing is done, and false otherwise.
*/
virtual bool AdvanceTracing(double deadline_in_ms) = 0;
/*
* Returns true if there no more tracing work to be done (see AdvanceTracing)
* and false otherwise.
*/
virtual bool IsTracingDone() = 0;
/**
* Called at the end of a GC cycle.
*
* Note that allocation is *not* allowed within |TraceEpilogue|. Can be
* overriden to fill a |TraceSummary| that is used by V8 to schedule future
* garbage collections.
*/
virtual void TraceEpilogue(TraceSummary* trace_summary) {}
/**
* Called upon entering the final marking pause. No more incremental marking
* steps will follow this call.
*/
virtual void EnterFinalPause(EmbedderStackState stack_state) = 0;
/*
* Called by the embedder to request immediate finalization of the currently
* running tracing phase that has been started with TracePrologue and not
* yet finished with TraceEpilogue.
*
* Will be a noop when currently not in tracing.
*
* This is an experimental feature.
*/
void FinalizeTracing();
/**
* See documentation on EmbedderRootsHandler.
*/
virtual bool IsRootForNonTracingGC(
const v8::TracedReference<v8::Value>& handle);
/**
* See documentation on EmbedderRootsHandler.
*/
virtual void ResetHandleInNonTracingGC(
const v8::TracedReference<v8::Value>& handle);
/*
* Called by the embedder to signal newly allocated or freed memory. Not bound
* to tracing phases. Embedders should trade off when increments are reported
* as V8 may consult global heuristics on whether to trigger garbage
* collection on this change.
*/
void IncreaseAllocatedSize(size_t bytes);
void DecreaseAllocatedSize(size_t bytes);
/*
* Returns the v8::Isolate this tracer is attached too and |nullptr| if it
* is not attached to any v8::Isolate.
*/
v8::Isolate* isolate() const { return v8_isolate_; }
protected:
v8::Isolate* v8_isolate_ = nullptr;
friend class internal::LocalEmbedderHeapTracer;
};
} // namespace v8
#endif // INCLUDE_V8_EMBEDDER_HEAP_H_

View File

@ -315,6 +315,7 @@ static_assert(1ULL << (64 - kBoundedSizeShift) ==
#ifdef V8_COMPRESS_POINTERS
#ifdef V8_TARGET_OS_ANDROID
// The size of the virtual memory reservation for an external pointer table.
// This determines the maximum number of entries in a table. Using a maximum
// size allows omitting bounds checks on table accesses if the indices are
@ -322,14 +323,18 @@ static_assert(1ULL << (64 - kBoundedSizeShift) ==
// value must be a power of two.
static const size_t kExternalPointerTableReservationSize = 512 * MB;
// The maximum number of entries in an external pointer table.
static const size_t kMaxExternalPointers =
kExternalPointerTableReservationSize / kApiSystemPointerSize;
// The external pointer table indices stored in HeapObjects as external
// pointers are shifted to the left by this amount to guarantee that they are
// smaller than the maximum table size.
static const uint32_t kExternalPointerIndexShift = 6;
#else
static const size_t kExternalPointerTableReservationSize = 1024 * MB;
static const uint32_t kExternalPointerIndexShift = 5;
#endif // V8_TARGET_OS_ANDROID
// The maximum number of entries in an external pointer table.
static const size_t kMaxExternalPointers =
kExternalPointerTableReservationSize / kApiSystemPointerSize;
static_assert((1 << (32 - kExternalPointerIndexShift)) == kMaxExternalPointers,
"kExternalPointerTableReservationSize and "
"kExternalPointerIndexShift don't match");

View File

@ -541,6 +541,7 @@ class V8_EXPORT Isolate {
kTurboFanOsrCompileStarted = 115,
kAsyncStackTaggingCreateTaskCall = 116,
kDurationFormat = 117,
kInvalidatedNumberStringPrototypeNoReplaceProtector = 118,
// If you add new values here, you'll also need to update Chromium's:
// web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
@ -928,27 +929,10 @@ class V8_EXPORT Isolate {
void RemoveGCPrologueCallback(GCCallbackWithData, void* data = nullptr);
void RemoveGCPrologueCallback(GCCallback callback);
START_ALLOW_USE_DEPRECATED()
/**
* Sets the embedder heap tracer for the isolate.
* SetEmbedderHeapTracer cannot be used simultaneously with AttachCppHeap.
*/
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
/*
* Gets the currently active heap tracer for the isolate that was set with
* SetEmbedderHeapTracer.
*/
EmbedderHeapTracer* GetEmbedderHeapTracer();
END_ALLOW_USE_DEPRECATED()
/**
* Sets an embedder roots handle that V8 should consider when performing
* non-unified heap garbage collections.
*
* Using only EmbedderHeapTracer automatically sets up a default handler.
* The intended use case is for setting a custom handler after invoking
* `AttachCppHeap()`.
* non-unified heap garbage collections. The intended use case is for setting
* a custom handler after invoking `AttachCppHeap()`.
*
* V8 does not take ownership of the handler.
*/
@ -959,8 +943,6 @@ class V8_EXPORT Isolate {
* embedder maintains ownership of the CppHeap. At most one C++ heap can be
* attached to V8.
*
* AttachCppHeap cannot be used simultaneously with SetEmbedderHeapTracer.
*
* Multi-threaded use requires the use of v8::Locker/v8::Unlocker, see
* CppHeap.
*/
@ -1350,11 +1332,13 @@ class V8_EXPORT Isolate {
* V8 uses this notification to guide heuristics which may result in a
* smaller memory footprint at the cost of reduced runtime performance.
*/
V8_DEPRECATED("Use IsolateInBackgroundNotification() instead")
void EnableMemorySavingsMode();
/**
* Optional notification which will disable the memory savings mode.
*/
V8_DEPRECATED("Use IsolateInBackgroundNotification() instead")
void DisableMemorySavingsMode();
/**
@ -1534,6 +1518,13 @@ class V8_EXPORT Isolate {
V8_DEPRECATED("Wasm exceptions are always enabled")
void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
/**
* Register callback to control whehter Wasm GC is enabled.
* The callback overwrites the value of the flag.
* If the callback returns true, it will also enable Wasm stringrefs.
*/
void SetWasmGCEnabledCallback(WasmGCEnabledCallback callback);
void SetSharedArrayBufferConstructorEnabledCallback(
SharedArrayBufferConstructorEnabledCallback callback);

View File

@ -53,6 +53,7 @@ class Utils;
namespace internal {
template <typename T>
class CustomArguments;
class SamplingHeapProfiler;
} // namespace internal
namespace api_internal {
@ -313,6 +314,7 @@ class Local {
friend class BasicTracedReference;
template <class F>
friend class TracedReference;
friend class v8::internal::SamplingHeapProfiler;
explicit V8_INLINE Local(T* that) : val_(that) {}
V8_INLINE static Local<T> New(Isolate* isolate, T* that) {

View File

@ -108,34 +108,12 @@ struct WasmModuleDecoded {
function_count(function_count),
wall_clock_duration_in_us(wall_clock_duration_in_us) {}
V8_DEPRECATED("Use the version without cpu_duration_in_us")
WasmModuleDecoded(bool async, bool streamed, bool success,
size_t module_size_in_bytes, size_t function_count,
int64_t wall_clock_duration_in_us,
int64_t cpu_duration_in_us)
: async(async),
streamed(streamed),
success(success),
module_size_in_bytes(module_size_in_bytes),
function_count(function_count),
wall_clock_duration_in_us(wall_clock_duration_in_us),
cpu_duration_in_us(cpu_duration_in_us) {}
START_ALLOW_USE_DEPRECATED()
// Copy constructor and copy assignment operator are allowed to copy the
// {cpu_duration_in_us} field.
WasmModuleDecoded(const WasmModuleDecoded&) = default;
WasmModuleDecoded& operator=(const WasmModuleDecoded&) = default;
END_ALLOW_USE_DEPRECATED()
bool async = false;
bool streamed = false;
bool success = false;
size_t module_size_in_bytes = 0;
size_t function_count = 0;
int64_t wall_clock_duration_in_us = -1;
V8_DEPRECATED("We do not collect cpu times any more")
int64_t cpu_duration_in_us = -1;
};
struct WasmModuleCompiled {
@ -155,30 +133,6 @@ struct WasmModuleCompiled {
liftoff_bailout_count(liftoff_bailout_count),
wall_clock_duration_in_us(wall_clock_duration_in_us) {}
V8_DEPRECATED("Use the version without cpu_duration_in_us")
WasmModuleCompiled(bool async, bool streamed, bool cached, bool deserialized,
bool lazy, bool success, size_t code_size_in_bytes,
size_t liftoff_bailout_count,
int64_t wall_clock_duration_in_us,
int64_t cpu_duration_in_us)
: async(async),
streamed(streamed),
cached(cached),
deserialized(deserialized),
lazy(lazy),
success(success),
code_size_in_bytes(code_size_in_bytes),
liftoff_bailout_count(liftoff_bailout_count),
wall_clock_duration_in_us(wall_clock_duration_in_us),
cpu_duration_in_us(cpu_duration_in_us) {}
START_ALLOW_USE_DEPRECATED()
// Copy constructor and copy assignment operator are allowed to copy the
// {cpu_duration_in_us} field.
WasmModuleCompiled(const WasmModuleCompiled&) = default;
WasmModuleCompiled& operator=(const WasmModuleCompiled&) = default;
END_ALLOW_USE_DEPRECATED()
bool async = false;
bool streamed = false;
bool cached = false;
@ -188,8 +142,6 @@ struct WasmModuleCompiled {
size_t code_size_in_bytes = 0;
size_t liftoff_bailout_count = 0;
int64_t wall_clock_duration_in_us = -1;
V8_DEPRECATED("We do not collect cpu times any more")
int64_t cpu_duration_in_us = -1;
};
struct WasmModuleInstantiated {

View File

@ -596,6 +596,7 @@ class V8_EXPORT HeapGraphNode {
kBigInt = 13, // BigInt.
kObjectShape = 14, // Internal data used for tracking the shapes (or
// "hidden classes") of JS objects.
kWasmObject = 15, // A WasmGC struct or array.
};
/** Returns node type (see HeapGraphNode::Type). */

View File

@ -347,6 +347,12 @@ class V8_EXPORT Script {
* ScriptOrigin. This can be either a v8::String or v8::Undefined.
*/
Local<Value> GetResourceName();
/**
* If the script was compiled, returns the positions of lazy functions which
* were eventually compiled and executed.
*/
std::vector<int> GetProducedCompileHints() const;
};
enum class ScriptType { kClassic, kModule };
@ -569,7 +575,8 @@ class V8_EXPORT ScriptCompiler {
enum CompileOptions {
kNoCompileOptions = 0,
kConsumeCodeCache,
kEagerCompile
kEagerCompile,
kProduceCompileHints
};
/**

View File

@ -117,11 +117,11 @@ class TracedReferenceBase {
/**
* A traced handle with copy and move semantics. The handle is to be used
* together with |v8::EmbedderHeapTracer| or as part of GarbageCollected objects
* (see v8-cppgc.h) and specifies edges from C++ objects to JavaScript.
* together as part of GarbageCollected objects (see v8-cppgc.h) or from stack
* and specifies edges from C++ objects to JavaScript.
*
* The exact semantics are:
* - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc.
* - Tracing garbage collections using CppHeap.
* - Non-tracing garbage collections refer to
* |v8::EmbedderRootsHandler::IsRoot()| whether the handle should
* be treated as root or not.
@ -166,7 +166,6 @@ class BasicTracedReference : public TracedReferenceBase {
Isolate* isolate, T* that, void* slot,
internal::GlobalHandleStoreMode store_mode);
friend class EmbedderHeapTracer;
template <typename F>
friend class Local;
friend class Object;
@ -181,13 +180,7 @@ class BasicTracedReference : public TracedReferenceBase {
/**
* A traced handle without destructor that clears the handle. The embedder needs
* to ensure that the handle is not accessed once the V8 object has been
* reclaimed. This can happen when the handle is not passed through the
* EmbedderHeapTracer. For more details see BasicTracedReference.
*
* The reference assumes the embedder has precise knowledge about references at
* all times. In case V8 needs to separately handle on-stack references, the
* embedder is required to set the stack start through
* |EmbedderHeapTracer::SetStackStart|.
* reclaimed. For more details see BasicTracedReference.
*/
template <typename T>
class TracedReference : public BasicTracedReference<T> {

View File

@ -9,7 +9,7 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 11
#define V8_MINOR_VERSION 1
#define V8_MINOR_VERSION 2
#define V8_BUILD_NUMBER 0
#define V8_PATCH_LEVEL 0

View File

@ -346,6 +346,14 @@ path. Add it with -I<path> to the command line
# define V8_HAS_ATTRIBUTE_NONNULL (__has_attribute(nonnull))
# define V8_HAS_ATTRIBUTE_NOINLINE (__has_attribute(noinline))
# define V8_HAS_ATTRIBUTE_UNUSED (__has_attribute(unused))
// Support for the "preserve_most" attribute is incomplete on 32-bit, and we see
// failures in component builds. Thus only use it in 64-bit non-component builds
// for now.
#if (defined(_M_X64) || defined(__x86_64__) || defined(__AARCH64EL__) || \
defined(_M_ARM64)) /* x64 or arm64 */ \
&& !defined(COMPONENT_BUILD)
# define V8_HAS_ATTRIBUTE_PRESERVE_MOST (__has_attribute(preserve_most))
#endif
# define V8_HAS_ATTRIBUTE_VISIBILITY (__has_attribute(visibility))
# define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
(__has_attribute(warn_unused_result))
@ -504,6 +512,21 @@ path. Add it with -I<path> to the command line
#endif
// A macro used to change the calling conventions to preserve all registers (no
// caller-saved registers). Use this for cold functions called from hot
// functions.
// Note: The attribute is considered experimental, so apply with care. Also,
// "preserve_most" is currently not handling the return value correctly, so only
// use it for functions returning void (see https://reviews.llvm.org/D141020).
// Use like:
// V8_NOINLINE V8_PRESERVE_MOST void UnlikelyMethod();
#if V8_HAS_ATTRIBUTE_PRESERVE_MOST
# define V8_PRESERVE_MOST __attribute__((preserve_most))
#else
# define V8_PRESERVE_MOST /* NOT SUPPORTED */
#endif
// A macro (V8_DEPRECATED) to mark classes or functions as deprecated.
#if defined(V8_DEPRECATION_WARNINGS)
# define V8_DEPRECATED(message) [[deprecated(message)]]

View File

@ -31,6 +31,10 @@
"label": "//test:v8_d8_default",
"type": "script",
},
"d8_pgo": {
"label": "//test:d8_pgo",
"type": "script",
},
"generate-bytecode-expectations": {
"label": "//test/unittests:generate-bytecode-expectations",
"type": "script",

View File

@ -52,6 +52,10 @@
'linux-v8-dr': 'release_x64',
},
'client.v8': {
# PGO
'V8 Linux PGO instrumentation - builder' : 'builtins_profiling_x86',
'V8 Linux64 PGO instrumentation - builder' : 'builtins_profiling_x64',
# Linux.
'V8 Linux - builder': 'release_x86_gcmole',
'V8 Linux - debug builder': 'debug_x86',
@ -59,7 +63,7 @@
'V8 Linux - noi18n - debug builder': 'debug_x86_no_i18n',
'V8 Linux - verify csa - builder': 'release_x86_verify_csa',
# Linux64.
'V8 Linux64 - builder': 'release_x64',
'V8 Linux64 - builder': 'release_x64_gcmole',
'V8 Linux64 - builder (goma cache silo)': 'release_x64',
'V8 Linux64 - builder (reclient)': 'release_x64_reclient',
'V8 Linux64 - builder (reclient compare)': 'release_x64_reclient',
@ -102,6 +106,7 @@
# Sanitizers.
'V8 Linux64 ASAN - builder': 'release_x64_asan',
'V8 Linux64 TSAN - builder': 'release_x64_tsan',
'V8 Linux64 TSAN - debug builder': 'debug_x64_tsan_minimal_symbols',
'V8 Linux64 TSAN - no-concurrent-marking - builder': 'release_x64_tsan_no_cm',
'V8 Linux - arm64 - sim - CFI - builder': 'release_simulate_arm64_cfi',
'V8 Linux - arm64 - sim - MSAN - builder': 'release_simulate_arm64_msan',
@ -182,7 +187,7 @@
'V8 Arm - builder': 'release_arm',
'V8 Arm - debug builder': 'debug_arm',
'V8 Android Arm - builder': 'release_android_arm',
'V8 Linux - arm - sim - builder': 'release_simulate_arm',
'V8 Linux - arm - sim - builder': 'release_simulate_arm_gcmole',
'V8 Linux - arm - sim - debug builder': 'debug_simulate_arm',
'V8 Linux - arm - sim - lite - builder': 'release_simulate_arm_lite',
'V8 Linux - arm - sim - lite - debug builder': 'debug_simulate_arm_lite',
@ -190,7 +195,7 @@
'V8 Android Arm64 - builder': 'release_android_arm64',
'V8 Android Arm64 - debug builder': 'debug_android_arm64',
'V8 Arm64 - builder': 'release_arm64_hard_float',
'V8 Linux - arm64 - sim - builder': 'release_simulate_arm64',
'V8 Linux - arm64 - sim - builder': 'release_simulate_arm64_gcmole',
'V8 Linux - arm64 - sim - debug builder': 'debug_simulate_arm64',
'V8 Linux - arm64 - sim - gc stress - builder': 'debug_simulate_arm64',
# Mips.
@ -242,7 +247,7 @@
'v8_linux64_nodcheck_compile_rel': 'release_x64',
'v8_linux64_perfetto_compile_dbg': 'debug_x64_perfetto',
'v8_linux64_no_pointer_compression_compile_rel': 'release_x64_no_pointer_compression',
'v8_linux64_compile_rel': 'release_x64_test_features_trybot',
'v8_linux64_compile_rel': 'release_x64_test_features_gcmole_trybot',
'v8_linux64_no_sandbox_compile_rel': 'release_x64_no_sandbox',
'v8_linux64_predictable_compile_rel': 'release_x64_predictable',
'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap',
@ -257,6 +262,7 @@
'v8_linux_riscv32_compile_rel': 'release_simulate_riscv32',
'v8_linux64_riscv64_compile_rel': 'release_simulate_riscv64',
'v8_linux64_tsan_compile_rel': 'release_x64_tsan_minimal_symbols',
'v8_linux64_tsan_compile_dbg': 'debug_x64_tsan_minimal_symbols',
'v8_linux64_tsan_no_cm_compile_rel': 'release_x64_tsan_no_cm',
'v8_linux64_tsan_isolates_compile_rel':
'release_x64_tsan_minimal_symbols',
@ -284,13 +290,13 @@
'v8_mac64_compile_dbg': 'debug_x64',
'v8_mac64_noopt_compile_dbg': 'full_debug_x64',
'v8_mac64_asan_compile_rel': 'release_x64_asan_no_lsan',
'v8_linux_arm_compile_rel': 'release_simulate_arm_trybot',
'v8_linux_arm_compile_rel': 'release_simulate_arm_gcmole_trybot',
'v8_linux_arm_lite_compile_dbg': 'debug_simulate_arm_lite',
'v8_linux_arm_lite_compile_rel': 'release_simulate_arm_lite_trybot',
'v8_linux_arm_compile_dbg': 'debug_simulate_arm',
'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot',
'v8_linux_arm_armv8a_dbg': 'debug_simulate_arm',
'v8_linux_arm64_compile_rel': 'release_simulate_arm64_trybot',
'v8_linux_arm64_compile_rel': 'release_simulate_arm64_gcmole_trybot',
'v8_linux_arm64_cfi_compile_rel' : 'release_simulate_arm64_cfi',
'v8_linux_arm64_compile_dbg': 'debug_simulate_arm64',
'v8_linux_arm64_gc_stress_compile_dbg': 'debug_simulate_arm64',
@ -397,18 +403,24 @@
'debug_bot', 'simulate_arm64', 'asan', 'lsan'],
# Release configs for simulators.
'release_simulate_arm': [
'release_bot', 'simulate_arm'],
'release_simulate_arm_gcmole': [
'release_bot', 'simulate_arm', 'gcmole'],
'release_simulate_arm_lite': [
'release_bot', 'simulate_arm', 'v8_enable_lite_mode'],
'release_simulate_arm_trybot': [
'release_trybot', 'simulate_arm'],
'release_simulate_arm_gcmole_trybot': [
'release_trybot', 'simulate_arm', 'gcmole'],
'release_simulate_arm_lite_trybot': [
'release_trybot', 'simulate_arm', 'v8_enable_lite_mode'],
'release_simulate_arm_trybot': [
'release_trybot', 'simulate_arm'],
'release_simulate_arm64': [
'release_bot', 'simulate_arm64'],
'release_simulate_arm64_cfi': [
'release_bot', 'simulate_arm64', 'v8_control_flow_integrity'],
'release_simulate_arm64_gcmole': [
'release_bot', 'simulate_arm64', 'gcmole'],
'release_simulate_arm64_gcmole_trybot': [
'release_trybot', 'simulate_arm64', 'gcmole'],
'release_simulate_arm64_no_pointer_compression': [
'release_bot', 'simulate_arm64', 'no_sandbox', 'dcheck_always_on',
'v8_enable_slow_dchecks', 'v8_disable_pointer_compression'],
@ -505,6 +517,8 @@
'release_x64_fuzzilli': [
'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks',
'v8_verify_heap', 'v8_verify_csa', 'fuzzilli'],
'release_x64_gcmole': [
'release_bot', 'x64', 'gcmole'],
'release_x64_msvc': [
'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'],
'release_x64_correctness_fuzzer' : [
@ -537,6 +551,8 @@
'release_bot', 'x64', 'no_sandbox'],
'release_x64_trybot': [
'release_trybot', 'x64'],
'release_x64_test_features_gcmole_trybot': [
'release_trybot', 'x64', 'v8_enable_test_features', 'gcmole'],
'release_x64_test_features_trybot': [
'release_trybot', 'x64', 'v8_enable_test_features'],
'release_x64_tsan': [
@ -599,6 +615,9 @@
'debug_trybot', 'x64', 'v8_enable_dict_property_const_tracking'],
'debug_x64_trybot_custom': [
'debug_trybot', 'x64', 'v8_snapshot_custom'],
'debug_x64_tsan_minimal_symbols': [
'debug_bot_no_slow_dchecks', 'minimal_symbols', 'x64', 'dcheck_always_on',
'tsan', 'v8_disable_verify_heap', 'v8_fast_mksnapshot'],
'full_debug_x64': [
'debug_bot', 'x64', 'v8_full_debug'],
@ -650,7 +669,11 @@
# Torque compare test
'torque_compare': [
'release_bot', 'verify_torque']
'release_bot', 'verify_torque'],
# PGO
'builtins_profiling_x86': ['builtins_profiling', 'x86'],
'builtins_profiling_x64': ['builtins_profiling', 'x64'],
},
'mixins': {
@ -675,6 +698,11 @@
'gn_args': 'is_asan=true',
},
'builtins_profiling': {
'mixins' : ['release_bot_reclient'],
'gn_args': 'v8_enable_builtins_profiling=true',
},
'cfi': {
'mixins': ['v8_enable_test_features'],
'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_icall=true '
@ -712,6 +740,12 @@
'gn_args': 'is_debug=true v8_enable_backtrace=true',
},
'debug_bot_no_slow_dchecks': {
'mixins': [
'debug', 'shared', 'goma', 'v8_disable_slow_dchecks',
'v8_optimized_debug', 'v8_enable_google_benchmark'],
},
'debug_bot': {
'mixins': [
'debug', 'shared', 'goma', 'v8_enable_slow_dchecks',
@ -934,6 +968,10 @@
'gn_args': 'v8_enable_runtime_call_stats=false',
},
'v8_disable_verify_heap': {
'gn_args': 'v8_enable_verify_heap=false',
},
'v8_expose_memory_corruption_api': {
'gn_args': 'v8_expose_memory_corruption_api=true',
},
@ -946,6 +984,10 @@
'gn_args': 'v8_enable_slow_dchecks=true',
},
'v8_disable_slow_dchecks': {
'gn_args': 'v8_enable_slow_dchecks=false',
},
'v8_enable_javascript_promise_hooks': {
'gn_args': 'v8_enable_javascript_promise_hooks=true',
},
@ -981,6 +1023,10 @@
'gn_args': 'v8_enable_vtunejit=true v8_enable_vtunetracemark=true',
},
'v8_fast_mksnapshot': {
'gn_args': 'v8_enable_fast_mksnapshot=true',
},
'v8_full_debug': {
'gn_args': 'v8_optimized_debug=false',
},

View File

@ -180,27 +180,7 @@
],
'shards': 4,
},
{'name': 'gcmole_v2', 'variant': 'ia32'},
{'name': 'gcmole_v2', 'variant': 'x64'},
{'name': 'gcmole_v2', 'variant': 'arm'},
{'name': 'gcmole_v2', 'variant': 'arm64'},
# TODO(https://crbug.com/v8/12660): Remove v2 above after testing.
{'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4},
{'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4},
{'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4},
{'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4},
{
'name': 'gcmole_v2',
'variant': 'x64',
'suffix': 'test single host',
'test_args': ['--test-run'],
},
{
'name': 'gcmole_v3',
'variant': 'x64',
'suffix': 'test multi host',
'test_args': ['--test-run'],
},
],
},
'v8_linux_optional_rel': {
@ -350,6 +330,7 @@
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
{'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4},
],
},
##############################################################################
@ -475,14 +456,10 @@
{'name': 'webkit', 'variant': 'stress_sampling'},
# Stress snapshot.
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Maglev.
{'name': 'mjsunit', 'variant': 'maglev'},
# Stress maglev.
{'name': 'mjsunit', 'variant': 'stress_maglev'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'v8_linux64_gc_stress_custom_snapshot_dbg': {
@ -636,6 +613,20 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
# GCMole.
{'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4},
{
'name': 'gcmole_v2',
'variant': 'x64',
'suffix': 'test single host',
'test_args': ['--test-run'],
},
{
'name': 'gcmole_v3',
'variant': 'x64',
'suffix': 'test multi host',
'test_args': ['--test-run'],
},
],
},
'v8_linux64_predictable_rel': {
@ -672,6 +663,18 @@
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2},
],
},
'v8_linux64_tsan_dbg': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks', 'shards': 2},
{'name': 'mozilla', 'shards': 4},
{'name': 'test262', 'variant': 'default', 'shards': 5},
{'name': 'v8testing', 'shards': 12},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
],
},
'v8_linux64_tsan_no_cm_rel': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -746,6 +749,7 @@
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
{'name': 'v8testing', 'variant': 'extra', 'shards': 14},
{'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4},
],
},
'v8_linux_arm64_cfi_rel': {
@ -838,7 +842,7 @@
# Win64
'v8_win64_asan_rel': {
'swarming_dimensions' : {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'v8testing', 'shards': 5},
@ -847,7 +851,7 @@
'v8_win64_dbg': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -859,7 +863,7 @@
'v8_win64_msvc_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -870,7 +874,7 @@
'v8_win64_rel': {
'swarming_dimensions' : {
'cpu': 'x86-64',
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -940,6 +944,8 @@
},
'tests': [
{'name': 'v8testing'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
],
},
'v8_mac_arm64_dbg': {
@ -950,6 +956,8 @@
},
'tests': [
{'name': 'v8testing'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
],
},
'v8_mac_arm64_full_dbg': {
@ -960,6 +968,8 @@
},
'tests': [
{'name': 'v8testing'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
],
},
'v8_mac_arm64_no_pointer_compression_dbg': {
@ -1089,27 +1099,7 @@
'test_args': ['--extra-flags', '--noenable-avx'],
'shards': 2
},
{'name': 'gcmole_v2', 'variant': 'ia32'},
{'name': 'gcmole_v2', 'variant': 'x64'},
{'name': 'gcmole_v2', 'variant': 'arm'},
{'name': 'gcmole_v2', 'variant': 'arm64'},
# TODO(https://crbug.com/v8/12660): Remove v2 above after testing.
{'name': 'gcmole_v3', 'variant': 'ia32', 'shards': 4},
{'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4},
{'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4},
{'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4},
{
'name': 'gcmole_v2',
'variant': 'x64',
'suffix': 'test single host',
'test_args': ['--test-run'],
},
{
'name': 'gcmole_v3',
'variant': 'x64',
'suffix': 'test multi host',
'test_args': ['--test-run'],
},
],
},
'V8 Linux - arm64 - sim - CFI': {
@ -1299,6 +1289,14 @@
{'name': 'v8testing'},
],
},
'V8 Linux PGO instrumentation - builder' : {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'pgo_instrumentation'}
],
},
'V8 Linux64': {
'swarming_dimensions': {
'cpu': 'x86-64-avx2',
@ -1345,6 +1343,20 @@
'suffix': 'noavx',
'test_args': ['--extra-flags', '--noenable-avx']
},
# GCMole.
{'name': 'gcmole_v3', 'variant': 'x64', 'shards': 4},
{
'name': 'gcmole_v2',
'variant': 'x64',
'suffix': 'test single host',
'test_args': ['--test-run'],
},
{
'name': 'gcmole_v3',
'variant': 'x64',
'suffix': 'test multi host',
'test_args': ['--test-run'],
},
],
},
'V8 Linux64 - cfi': {
@ -1457,14 +1469,10 @@
{'name': 'webkit', 'variant': 'stress_sampling'},
# Stress snapshot.
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Maglev.
{'name': 'mjsunit', 'variant': 'maglev'},
# Stress maglev.
{'name': 'mjsunit', 'variant': 'stress_maglev'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'V8 Linux64 - cppgc-non-default - debug': {
@ -1523,14 +1531,10 @@
{'name': 'webkit', 'variant': 'stress_sampling'},
# Stress snapshot.
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Maglev.
{'name': 'mjsunit', 'variant': 'maglev'},
# Stress maglev.
{'name': 'mjsunit', 'variant': 'stress_maglev'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
'V8 Linux64 gcc': {
@ -1657,6 +1661,14 @@
},
],
},
'V8 Linux64 PGO instrumentation - builder' : {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'pgo_instrumentation'}
],
},
'V8 Linux64 TSAN': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -1671,6 +1683,18 @@
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2},
],
},
'V8 Linux64 TSAN - debug': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
},
'tests': [
{'name': 'benchmarks', 'shards': 2},
{'name': 'mozilla', 'shards': 4},
{'name': 'test262', 'variant': 'default', 'shards': 5},
{'name': 'v8testing', 'shards': 12},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
],
},
'V8 Linux64 TSAN - stress-incremental-marking': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@ -1797,6 +1821,8 @@
'tests': [
{'name': 'v8testing'},
{'name': 'v8testing', 'variant': 'extra'},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
],
},
'V8 Mac - arm64 - debug': {
@ -1813,6 +1839,8 @@
'tests': [
{'name': 'v8testing', 'shards': 2},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
# Maglev -- move to extra once more architectures are supported.
{'name': 'mjsunit', 'variant': 'maglev'},
],
},
'V8 Mac - arm64 - no pointer compression debug': {
@ -1879,7 +1907,7 @@
},
'V8 Win64': {
'swarming_dimensions': {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -1890,7 +1918,7 @@
},
'V8 Win64 - debug': {
'swarming_dimensions': {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -1901,7 +1929,7 @@
},
'V8 Win64 - msvc': {
'swarming_dimensions': {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'mozilla'},
@ -1911,7 +1939,7 @@
},
'V8 Win64 ASAN': {
'swarming_dimensions': {
'os': 'Windows-10-19042',
'os': 'Windows-10-19045',
},
'tests': [
{'name': 'v8testing', 'shards': 5},
@ -2056,6 +2084,8 @@
'test_args': ['--novfp3'],
'shards': 6
},
# GCMole.
{'name': 'gcmole_v3', 'variant': 'arm', 'shards': 4},
],
},
'V8 Linux - arm - sim - debug': {
@ -2137,6 +2167,7 @@
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
{'name': 'gcmole_v3', 'variant': 'arm64', 'shards': 4},
],
},
'V8 Linux - arm64 - sim - debug': {

View File

@ -10,19 +10,20 @@ include_rules = [
"-src/bigint",
"+src/bigint/bigint.h",
"-src/compiler",
"+src/compiler/pipeline.h",
"+src/compiler/code-assembler.h",
"+src/compiler/turbofan.h",
"+src/compiler/wasm-compiler-definitions.h",
"+src/compiler/wasm-compiler.h",
"-src/heap",
"+src/heap/basic-memory-chunk.h",
"+src/heap/code-range.h",
"+src/heap/combined-heap.h",
"+src/heap/embedder-tracing.h",
"+src/heap/factory.h",
"+src/heap/factory-inl.h",
# TODO(v8:10496): Don't expose so much (through transitive includes) outside
# of heap/.
"+src/heap/gc-tracer.h",
"+src/heap/gc-tracer-inl.h",
"+src/heap/heap.h",
"+src/heap/heap-verifier.h",
"+src/heap/heap-inl.h",
@ -76,6 +77,7 @@ include_rules = [
"+starboard",
# Using cppgc inside v8 is not (yet) allowed.
"-include/cppgc",
"+include/cppgc/common.h",
"+include/cppgc/platform.h",
"+include/cppgc/source-location.h",
]
@ -84,7 +86,8 @@ specific_include_rules = {
"d8\.cc": [
"+include/libplatform/libplatform.h",
"+include/libplatform/v8-tracing.h",
"+perfetto/tracing.h"
"+perfetto/tracing/track_event.h",
"+perfetto/tracing/track_event_legacy.h"
],
"d8-platforms\.cc": [
"+include/libplatform/libplatform.h",

View File

@ -83,7 +83,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(getter)),
Object);
Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<JSFunction>::cast(getter)->set_code(*trampoline);
}
if (setter->IsFunctionTemplateInfo() &&
@ -93,7 +93,7 @@ MaybeHandle<Object> DefineAccessorProperty(Isolate* isolate,
InstantiateFunction(isolate,
Handle<FunctionTemplateInfo>::cast(setter)),
Object);
Handle<CodeT> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<Code> trampoline = BUILTIN_CODE(isolate, DebugBreakTrampoline);
Handle<JSFunction>::cast(setter)->set_code(*trampoline);
}
RETURN_ON_EXCEPTION(

View File

@ -38,6 +38,7 @@
#include "src/baseline/baseline-batch-compiler.h"
#include "src/builtins/accessors.h"
#include "src/builtins/builtins-utils.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/script-details.h"
@ -64,7 +65,6 @@
#include "src/handles/persistent-handles.h"
#include "src/handles/shared-object-conveyor-handles.h"
#include "src/handles/traced-handles.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/safepoint.h"
@ -128,7 +128,6 @@
#include "src/tracing/trace-event.h"
#include "src/utils/detachable-vector.h"
#include "src/utils/version.h"
#include "src/web-snapshot/web-snapshot.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/trap-handler/trap-handler.h"
@ -2245,42 +2244,6 @@ MaybeLocal<Value> Script::Run(Local<Context> context,
}
#endif
auto fun = i::Handle<i::JSFunction>::cast(Utils::OpenHandle(this));
// TODO(crbug.com/1193459): remove once ablation study is completed
base::ElapsedTimer timer;
base::TimeDelta delta;
if (i::v8_flags.script_delay > 0) {
delta = v8::base::TimeDelta::FromMillisecondsD(i::v8_flags.script_delay);
}
if (i::v8_flags.script_delay_once > 0 && !i_isolate->did_run_script_delay()) {
delta =
v8::base::TimeDelta::FromMillisecondsD(i::v8_flags.script_delay_once);
i_isolate->set_did_run_script_delay(true);
}
if (i::v8_flags.script_delay_fraction > 0.0) {
timer.Start();
} else if (delta.InMicroseconds() > 0) {
timer.Start();
while (timer.Elapsed() < delta) {
// Busy wait.
}
}
if (V8_UNLIKELY(i::v8_flags.experimental_web_snapshots)) {
i::Handle<i::HeapObject> maybe_script =
handle(fun->shared().script(), i_isolate);
if (maybe_script->IsScript() &&
i::Script::cast(*maybe_script).type() == i::Script::TYPE_WEB_SNAPSHOT) {
i::WebSnapshotDeserializer deserializer(
reinterpret_cast<i::Isolate*>(v8_isolate),
i::Handle<i::Script>::cast(maybe_script));
deserializer.Deserialize();
RETURN_ON_FAILED_EXECUTION(Value);
Local<Value> result = v8::Undefined(v8_isolate);
RETURN_ESCAPED(result);
}
}
i::Handle<i::Object> receiver = i_isolate->global_proxy();
// TODO(cbruni, chromium:1244145): Remove once migrated to the context.
i::Handle<i::Object> options(
@ -2290,15 +2253,6 @@ MaybeLocal<Value> Script::Run(Local<Context> context,
has_pending_exception = !ToLocal<Value>(
i::Execution::CallScript(i_isolate, fun, receiver, options), &result);
if (i::v8_flags.script_delay_fraction > 0.0) {
delta = v8::base::TimeDelta::FromMillisecondsD(
timer.Elapsed().InMillisecondsF() * i::v8_flags.script_delay_fraction);
timer.Restart();
while (timer.Elapsed() < delta) {
// Busy wait.
}
}
RETURN_ON_FAILED_EXECUTION(Value);
RETURN_ESCAPED(result);
}
@ -2337,6 +2291,31 @@ Local<Value> Script::GetResourceName() {
i::handle(i::Script::cast(sfi.script()).name(), i_isolate));
}
std::vector<int> Script::GetProducedCompileHints() const {
i::DisallowGarbageCollection no_gc;
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
i::Isolate* i_isolate = func->GetIsolate();
i::SharedFunctionInfo sfi = (*func).shared();
CHECK(sfi.script().IsScript());
i::Script script = i::Script::cast(sfi.script());
i::Object maybe_array_list = script.compiled_lazy_function_positions();
std::vector<int> result;
if (!maybe_array_list.IsUndefined(i_isolate)) {
i::ArrayList array_list = i::ArrayList::cast(maybe_array_list);
result.reserve(array_list.Length());
for (int i = 0; i < array_list.Length(); ++i) {
i::Object item = array_list.Get(i);
CHECK(item.IsSmi());
result.push_back(i::Smi::ToInt(item));
}
// Clear the data; the embedder can still request more data later, but it'll
// have to keep track of the original data itself.
script.set_compiled_lazy_function_positions(
i::ReadOnlyRoots(i_isolate).undefined_value());
}
return result;
}
// static
Local<PrimitiveArray> PrimitiveArray::New(Isolate* v8_isolate, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
@ -2759,9 +2738,10 @@ MaybeLocal<Script> ScriptCompiler::Compile(Local<Context> context,
MaybeLocal<Module> ScriptCompiler::CompileModule(
Isolate* v8_isolate, Source* source, CompileOptions options,
NoCacheReason no_cache_reason) {
Utils::ApiCheck(options == kNoCompileOptions || options == kConsumeCodeCache,
"v8::ScriptCompiler::CompileModule",
"Invalid CompileOptions");
Utils::ApiCheck(
options == kNoCompileOptions || options == kConsumeCodeCache ||
options == kProduceCompileHints,
"v8::ScriptCompiler::CompileModule", "Invalid CompileOptions");
Utils::ApiCheck(source->GetResourceOptions().IsModule(),
"v8::ScriptCompiler::CompileModule",
"Invalid ScriptOrigin: is_module must be true");
@ -2896,7 +2876,8 @@ void ScriptCompiler::ScriptStreamingTask::Run() { data_->task->Run(); }
ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming(
Isolate* v8_isolate, StreamedSource* source, v8::ScriptType type,
CompileOptions options) {
Utils::ApiCheck(options == kNoCompileOptions || options == kEagerCompile,
Utils::ApiCheck(options == kNoCompileOptions || options == kEagerCompile ||
options == kProduceCompileHints,
"v8::ScriptCompiler::StartStreaming",
"Invalid CompileOptions");
if (!i::v8_flags.script_streaming) return nullptr;
@ -4953,8 +4934,8 @@ Maybe<bool> v8::Object::SetIntegrityLevel(Local<Context> context,
auto self = Utils::OpenHandle(this);
i::JSReceiver::IntegrityLevel i_level =
level == IntegrityLevel::kFrozen ? i::FROZEN : i::SEALED;
Maybe<bool> result =
i::JSReceiver::SetIntegrityLevel(self, i_level, i::kThrowOnError);
Maybe<bool> result = i::JSReceiver::SetIntegrityLevel(
i_isolate, self, i_level, i::kThrowOnError);
has_pending_exception = result.IsNothing();
RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool);
return result;
@ -6736,7 +6717,7 @@ Local<Context> NewContext(
// TODO(jkummerow): This is for crbug.com/713699. Remove it if it doesn't
// fail.
// Sanity-check that the isolate is initialized and usable.
CHECK(i_isolate->builtins()->code(i::Builtin::kIllegal).IsCodeT());
CHECK(i_isolate->builtins()->code(i::Builtin::kIllegal).IsCode());
TRACE_EVENT_CALL_STATS_SCOPED(i_isolate, "v8", "V8.NewContext");
API_RCS_SCOPE(i_isolate, Context, New);
@ -8967,21 +8948,6 @@ void Isolate::RemoveGCEpilogueCallback(GCCallback callback) {
RemoveGCEpilogueCallback(CallGCCallbackWithoutData, data);
}
START_ALLOW_USE_DEPRECATED()
void Isolate::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
CHECK_NULL(i_isolate->heap()->cpp_heap());
i_isolate->heap()->SetEmbedderHeapTracer(tracer);
}
EmbedderHeapTracer* Isolate::GetEmbedderHeapTracer() {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
return i_isolate->heap()->GetEmbedderHeapTracer();
}
END_ALLOW_USE_DEPRECATED()
void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
i_isolate->heap()->SetEmbedderRootsHandler(handler);
@ -8989,7 +8955,6 @@ void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) {
void Isolate::AttachCppHeap(CppHeap* cpp_heap) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
CHECK_NULL(GetEmbedderHeapTracer());
i_isolate->heap()->AttachCppHeap(cpp_heap);
}
@ -9747,6 +9712,7 @@ void Isolate::ClearCachesForTesting() {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(this);
i_isolate->AbortConcurrentOptimization(i::BlockingBehavior::kBlock);
i_isolate->ClearSerializerData();
i_isolate->compilation_cache()->Clear();
}
void Isolate::EnableMemorySavingsMode() {
@ -9818,7 +9784,7 @@ JSEntryStubs Isolate::GetJSEntryStubs() {
{i::Builtin::kJSRunMicrotasksEntry,
&entry_stubs.js_run_microtasks_entry_stub}}};
for (auto& pair : stubs) {
i::CodeT js_entry = i_isolate->builtins()->code(pair.first);
i::Code js_entry = i_isolate->builtins()->code(pair.first);
pair.second->code.start =
reinterpret_cast<const void*>(js_entry.InstructionStart());
pair.second->code.length_in_bytes = js_entry.InstructionSize();
@ -9877,6 +9843,9 @@ CALLBACK_SETTER(WasmAsyncResolvePromiseCallback,
CALLBACK_SETTER(WasmLoadSourceMapCallback, WasmLoadSourceMapCallback,
wasm_load_source_map_callback)
CALLBACK_SETTER(WasmGCEnabledCallback, WasmGCEnabledCallback,
wasm_gc_enabled_callback)
CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback,
SharedArrayBufferConstructorEnabledCallback,
sharedarraybuffer_constructor_enabled_callback)
@ -10799,71 +10768,6 @@ void HeapProfiler::SetGetDetachednessCallback(GetDetachednessCallback callback,
data);
}
void EmbedderHeapTracer::SetStackStart(void* stack_start) {
CHECK(v8_isolate_);
reinterpret_cast<i::Isolate*>(v8_isolate_)
->heap()
->SetStackStart(stack_start);
}
void EmbedderHeapTracer::FinalizeTracing() {
if (v8_isolate_) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate_);
if (i_isolate->heap()->incremental_marking()->IsMarking()) {
i_isolate->heap()->FinalizeIncrementalMarkingAtomically(
i::GarbageCollectionReason::kExternalFinalize);
}
}
}
void EmbedderHeapTracer::IncreaseAllocatedSize(size_t bytes) {
if (v8_isolate_) {
i::LocalEmbedderHeapTracer* const tracer =
reinterpret_cast<i::Isolate*>(v8_isolate_)
->heap()
->local_embedder_heap_tracer();
DCHECK_NOT_NULL(tracer);
tracer->IncreaseAllocatedSize(bytes);
}
}
void EmbedderHeapTracer::DecreaseAllocatedSize(size_t bytes) {
if (v8_isolate_) {
i::LocalEmbedderHeapTracer* const tracer =
reinterpret_cast<i::Isolate*>(v8_isolate_)
->heap()
->local_embedder_heap_tracer();
DCHECK_NOT_NULL(tracer);
tracer->DecreaseAllocatedSize(bytes);
}
}
void EmbedderHeapTracer::RegisterEmbedderReference(
const BasicTracedReference<v8::Data>& ref) {
if (ref.IsEmpty()) return;
i::Heap* const heap = reinterpret_cast<i::Isolate*>(v8_isolate_)->heap();
heap->RegisterExternallyReferencedObject(
reinterpret_cast<i::Address*>(ref.val_));
}
void EmbedderHeapTracer::IterateTracedGlobalHandles(
TracedGlobalHandleVisitor* visitor) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(v8_isolate_);
i::DisallowGarbageCollection no_gc;
i_isolate->traced_handles()->Iterate(visitor);
}
bool EmbedderHeapTracer::IsRootForNonTracingGC(
const v8::TracedReference<v8::Value>& handle) {
return true;
}
void EmbedderHeapTracer::ResetHandleInNonTracingGC(
const v8::TracedReference<v8::Value>& handle) {
UNREACHABLE();
}
EmbedderStateScope::EmbedderStateScope(Isolate* v8_isolate,
Local<v8::Context> context,
EmbedderStateTag tag)

View File

@ -2240,7 +2240,7 @@ AsmType* AsmJsParser::ValidateCall() {
function_type->AsFunctionType()->AddArgument(t);
}
FunctionSig* sig = ConvertSignature(return_type, param_types);
uint32_t signature_index = module_builder_->AddSignature(sig);
uint32_t signature_index = module_builder_->AddSignature(sig, true);
// Emit actual function invocation depending on the kind. At this point we
// also determined the complete function type and can perform checking against

View File

@ -1613,7 +1613,9 @@ enum AssignType {
PRIVATE_METHOD, // obj.#key: #key is a private method
PRIVATE_GETTER_ONLY, // obj.#key: #key only has a getter defined
PRIVATE_SETTER_ONLY, // obj.#key: #key only has a setter defined
PRIVATE_GETTER_AND_SETTER // obj.#key: #key has both accessors defined
PRIVATE_GETTER_AND_SETTER, // obj.#key: #key has both accessors defined
PRIVATE_DEBUG_DYNAMIC, // obj.#key: #key is private that requries dynamic
// lookup in debug-evaluate.
};
class Property final : public Expression {
@ -1650,6 +1652,9 @@ class Property final : public Expression {
return PRIVATE_SETTER_ONLY;
case VariableMode::kPrivateGetterAndSetter:
return PRIVATE_GETTER_AND_SETTER;
case VariableMode::kDynamic:
// From debug-evaluate.
return PRIVATE_DEBUG_DYNAMIC;
default:
UNREACHABLE();
}

View File

@ -1367,6 +1367,10 @@ void AstPrinter::VisitProperty(Property* node) {
PrintIndentedVisit("KEY", node->key());
break;
}
case PRIVATE_DEBUG_DYNAMIC: {
PrintIndentedVisit("PRIVATE_DEBUG_DYNAMIC", node->key());
break;
}
case NON_PROPERTY:
UNREACHABLE();
}

View File

@ -2061,6 +2061,15 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
return var;
}
void Scope::ForceDynamicLookup(VariableProxy* proxy) {
// At the moment this is only used for looking up private names dynamically
// in debug-evaluate from top-level scope.
DCHECK(proxy->IsPrivateName());
DCHECK(is_script_scope() || is_module_scope() || is_eval_scope());
Variable* dynamic = NonLocal(proxy->raw_name(), VariableMode::kDynamic);
proxy->BindTo(dynamic);
}
// static
template <Scope::ScopeLookupMode mode>
Variable* Scope::Lookup(VariableProxy* proxy, Scope* scope,
@ -3112,6 +3121,13 @@ void PrivateNameScopeIterator::AddUnresolvedPrivateName(VariableProxy* proxy) {
// be new.
DCHECK(!proxy->is_resolved());
DCHECK(proxy->IsPrivateName());
// Use dynamic lookup for top-level scopes in debug-evaluate.
if (Done()) {
start_scope_->ForceDynamicLookup(proxy);
return;
}
GetScope()->EnsureRareData()->unresolved_private_names.Add(proxy);
// Any closure scope that contain uses of private names that skips over a
// class scope due to heritage expressions need private name context chain

View File

@ -637,6 +637,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) {
return nullptr;
}
void ForceDynamicLookup(VariableProxy* proxy);
protected:
explicit Scope(Zone* zone);

View File

@ -70,9 +70,10 @@ constexpr int kPageSizeBits = 18;
// The minimal supported page size by the operation system. Any region aligned
// to that size needs to be individually protectable via
// {base::OS::SetPermission} and friends.
#if (defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)) || \
defined(V8_HOST_ARCH_LOONG64) || defined(V8_HOST_ARCH_MIPS64)
// MacOS on arm64 uses 16kB pages.
#if (defined(V8_OS_MACOS) && defined(V8_HOST_ARCH_ARM64)) || \
defined(V8_HOST_ARCH_LOONG64) || defined(V8_HOST_ARCH_MIPS64) || \
defined(V8_OS_IOS)
// MacOS & iOS on arm64 uses 16kB pages.
// LOONG64 and MIPS64 also use 16kB pages.
constexpr int kMinimumOSPageSize = 16 * 1024;
#elif defined(V8_OS_LINUX) && !defined(V8_OS_ANDROID) && \

View File

@ -404,6 +404,7 @@ CPU::CPU()
has_vfp3_(false),
has_vfp3_d32_(false),
has_jscvt_(false),
has_dot_prod_(false),
is_fp64_mode_(false),
has_non_stop_time_stamp_counter_(false),
is_running_in_vm_(false),
@ -726,20 +727,27 @@ CPU::CPU()
#if !defined(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE)
constexpr int PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE = 44;
#endif
#if !defined(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE)
constexpr int PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE = 43;
#endif
has_jscvt_ =
IsProcessorFeaturePresent(PF_ARM_V83_JSCVT_INSTRUCTIONS_AVAILABLE);
has_dot_prod_ =
IsProcessorFeaturePresent(PF_ARM_V82_DP_INSTRUCTIONS_AVAILABLE);
#elif V8_OS_LINUX
// Try to extract the list of CPU features from ELF hwcaps.
uint32_t hwcaps = ReadELFHWCaps();
if (hwcaps != 0) {
has_jscvt_ = (hwcaps & HWCAP_JSCVT) != 0;
has_dot_prod_ = (hwcaps & HWCAP_ASIMDDP) != 0;
} else {
// Try to fallback to "Features" CPUInfo field
CPUInfo cpu_info;
char* features = cpu_info.ExtractField("Features");
has_jscvt_ = HasListItem(features, "jscvt");
has_dot_prod_ = HasListItem(features, "asimddp");
delete[] features;
}
#elif V8_OS_DARWIN
@ -752,9 +760,18 @@ CPU::CPU()
} else {
has_jscvt_ = feat_jscvt;
}
int64_t feat_dot_prod = 0;
size_t feat_dot_prod_size = sizeof(feat_dot_prod);
if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &feat_dot_prod,
&feat_dot_prod_size, nullptr, 0) == -1) {
has_dot_prod_ = false;
} else {
has_dot_prod_ = feat_dot_prod;
}
#else
// ARM64 Macs always have JSCVT.
// ARM64 Macs always have JSCVT and ASIMDDP
has_jscvt_ = true;
has_dot_prod_ = true;
#endif // V8_OS_IOS
#endif // V8_OS_WIN

View File

@ -123,6 +123,7 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp3() const { return has_vfp3_; }
bool has_vfp3_d32() const { return has_vfp3_d32_; }
bool has_jscvt() const { return has_jscvt_; }
bool has_dot_prod() const { return has_dot_prod_; }
// mips features
bool is_fp64_mode() const { return is_fp64_mode_; }
@ -176,6 +177,7 @@ class V8_BASE_EXPORT CPU final {
bool has_vfp3_;
bool has_vfp3_d32_;
bool has_jscvt_;
bool has_dot_prod_;
bool is_fp64_mode_;
bool has_non_stop_time_stamp_counter_;
bool is_running_in_vm_;

View File

@ -126,8 +126,8 @@ inline size_t MallocUsableSize(void* ptr) {
// Mimics C++23 `allocation_result`.
template <class Pointer>
struct AllocationResult {
Pointer ptr;
size_t count;
Pointer ptr = nullptr;
size_t count = 0;
};
// Allocates at least `n * sizeof(T)` uninitialized storage but may allocate

View File

@ -505,8 +505,9 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
// TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
// https://crbug.com/823915
#if defined(V8_OS_DARWIN)
if (access != OS::MemoryPermission::kNoAccess)
if (access != OS::MemoryPermission::kNoAccess) {
madvise(address, size, MADV_FREE_REUSE);
}
#endif
return ret == 0;
@ -554,14 +555,19 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
}
#elif defined(_AIX) || defined(V8_OS_SOLARIS)
int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
if (ret != 0 && errno == ENOSYS)
if (ret != 0 && errno == ENOSYS) {
return true; // madvise is not available on all systems.
if (ret != 0 && errno == EINVAL)
}
if (ret != 0 && errno == EINVAL) {
ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
}
#else
int ret = madvise(address, size, MADV_DONTNEED);
#endif
return ret == 0;
// madvise with MADV_DONTNEED only fails on illegal parameters. That's a bug
// in the caller.
CHECK_EQ(0, ret);
return true;
}
#if !defined(_AIX)
@ -576,9 +582,14 @@ bool OS::DecommitPages(void* address, size_t size) {
// shall be removed, as if by an appropriate call to munmap(), before the new
// mapping is established." As a consequence, the memory will be
// zero-initialized on next access.
void* ptr = mmap(address, size, PROT_NONE,
void* ret = mmap(address, size, PROT_NONE,
MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
return ptr == address;
if (V8_UNLIKELY(ret == MAP_FAILED)) {
CHECK_EQ(ENOMEM, errno);
return false;
}
CHECK_EQ(ret, address);
return true;
}
#endif // !defined(_AIX)

View File

@ -91,10 +91,10 @@ class V8_BASE_EXPORT TimeDelta final {
return TimeDelta(nanoseconds / TimeConstants::kNanosecondsPerMicrosecond);
}
static TimeDelta FromSecondsD(double seconds) {
static constexpr TimeDelta FromSecondsD(double seconds) {
return FromDouble(seconds * TimeConstants::kMicrosecondsPerSecond);
}
static TimeDelta FromMillisecondsD(double milliseconds) {
static constexpr TimeDelta FromMillisecondsD(double milliseconds) {
return FromDouble(milliseconds *
TimeConstants::kMicrosecondsPerMillisecond);
}
@ -210,8 +210,7 @@ class V8_BASE_EXPORT TimeDelta final {
}
private:
// TODO(v8:10620): constexpr requires constexpr saturated_cast.
static inline TimeDelta FromDouble(double value);
static constexpr inline TimeDelta FromDouble(double value);
template<class TimeClass> friend class time_internal::TimeBase;
// Constructs a delta given the duration in microseconds. This is private
@ -224,7 +223,7 @@ class V8_BASE_EXPORT TimeDelta final {
};
// static
TimeDelta TimeDelta::FromDouble(double value) {
constexpr TimeDelta TimeDelta::FromDouble(double value) {
return TimeDelta(saturated_cast<int64_t>(value));
}

View File

@ -142,10 +142,10 @@ class SmallVector {
template <typename... Args>
void emplace_back(Args&&... args) {
T* end = end_;
if (V8_UNLIKELY(end == end_of_storage_)) end = Grow();
new (end) T(std::forward<Args>(args)...);
end_ = end + 1;
if (V8_UNLIKELY(end_ == end_of_storage_)) Grow();
void* storage = end_;
end_ += 1;
new (storage) T(std::forward<Args>(args)...);
}
void push_back(T x) { emplace_back(std::move(x)); }
@ -200,10 +200,10 @@ class SmallVector {
// Grows the backing store by a factor of two. Returns the new end of the used
// storage (this reduces binary size).
V8_NOINLINE T* Grow() { return Grow(0); }
V8_NOINLINE V8_PRESERVE_MOST void Grow() { Grow(0); }
// Grows the backing store by a factor of two, and at least to {min_capacity}.
V8_NOINLINE T* Grow(size_t min_capacity) {
V8_NOINLINE V8_PRESERVE_MOST void Grow(size_t min_capacity) {
size_t in_use = end_ - begin_;
size_t new_capacity =
base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity()));
@ -220,14 +220,13 @@ class SmallVector {
begin_ = new_storage;
end_ = new_storage + in_use;
end_of_storage_ = new_storage + new_capacity;
return end_;
}
T* AllocateDynamicStorage(size_t number_of_elements) {
return allocator_.allocate(number_of_elements);
}
void FreeDynamicStorage() {
V8_NOINLINE V8_PRESERVE_MOST void FreeDynamicStorage() {
DCHECK(is_big());
allocator_.deallocate(begin_, end_of_storage_ - begin_);
}

View File

@ -39,35 +39,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
enum class Condition : uint32_t {
kEqual = static_cast<uint32_t>(eq),
kNotEqual = static_cast<uint32_t>(ne),
kLessThan = static_cast<uint32_t>(lt),
kGreaterThan = static_cast<uint32_t>(gt),
kLessThanEqual = static_cast<uint32_t>(le),
kGreaterThanEqual = static_cast<uint32_t>(ge),
kUnsignedLessThan = static_cast<uint32_t>(lo),
kUnsignedGreaterThan = static_cast<uint32_t>(hi),
kUnsignedLessThanEqual = static_cast<uint32_t>(ls),
kUnsignedGreaterThanEqual = static_cast<uint32_t>(hs),
kOverflow = static_cast<uint32_t>(vs),
kNoOverflow = static_cast<uint32_t>(vc),
kZero = static_cast<uint32_t>(eq),
kNotZero = static_cast<uint32_t>(ne),
};
inline internal::Condition AsMasmCondition(Condition cond) {
// This is important for arm, where the internal::Condition where each value
// represents an encoded bit field value.
static_assert(sizeof(internal::Condition) == sizeof(Condition));
return static_cast<internal::Condition>(cond);
}
namespace detail {
#ifdef DEBUG
@ -132,13 +103,13 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
__ tst(value, Operand(mask));
__ b(AsMasmCondition(cc), target);
__ b(cc, target);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
__ cmp(lhs, Operand(rhs));
__ b(AsMasmCondition(cc), target);
__ b(cc, target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@ -409,7 +380,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch);
__ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch);
__ b(eq, on_result);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
@ -536,8 +507,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-arm.cc
ScratchRegisterScope scope(this);
JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels),
&fallthrough);
JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough);
// Ensure to emit the constant pool first if necessary.
__ CheckConstPool(true, true);
__ BlockConstPoolFor(num_labels);
@ -591,8 +561,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
__ JumpIf(Condition::kGreaterThanEqual, params_size,
Operand(actual_params_size), &corrected_args_count);
__ JumpIf(kGreaterThanEqual, params_size, Operand(actual_params_size),
&corrected_args_count);
__ masm()->mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);

View File

@ -38,32 +38,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint32_t {
kEqual = eq,
kNotEqual = ne,
kLessThan = lt,
kGreaterThan = gt,
kLessThanEqual = le,
kGreaterThanEqual = ge,
kUnsignedLessThan = lo,
kUnsignedGreaterThan = hi,
kUnsignedLessThanEqual = ls,
kUnsignedGreaterThanEqual = hs,
kOverflow = vs,
kNoOverflow = vc,
kZero = eq,
kNotZero = ne,
};
inline internal::Condition AsMasmCondition(Condition cond) {
return static_cast<internal::Condition>(cond);
}
namespace detail {
#ifdef DEBUG
@ -125,19 +99,19 @@ void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
if (cc == Condition::kZero) {
if (cc == kZero) {
__ TestAndBranchIfAllClear(value, mask, target);
} else if (cc == Condition::kNotZero) {
} else if (cc == kNotZero) {
__ TestAndBranchIfAnySet(value, mask, target);
} else {
__ Tst(value, Immediate(mask));
__ B(AsMasmCondition(cc), target);
__ B(cc, target);
}
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
__ CompareAndBranch(lhs, rhs, AsMasmCondition(cc), target);
__ CompareAndBranch(lhs, rhs, cc, target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@ -173,14 +147,14 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance distance) {
__ AssertSmi(value);
__ CompareTaggedAndBranch(value, smi, AsMasmCondition(cc), target);
__ CompareTaggedAndBranch(value, smi, cc, target);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ CompareTaggedAndBranch(lhs, rhs, AsMasmCondition(cc), target);
__ CompareTaggedAndBranch(lhs, rhs, cc, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@ -188,7 +162,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
__ CompareTaggedAndBranch(value, tmp, AsMasmCondition(cc), target);
__ CompareTaggedAndBranch(value, tmp, cc, target);
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
@ -196,7 +170,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ Ldr(tmp, operand);
__ CompareTaggedAndBranch(tmp, value, AsMasmCondition(cc), target);
__ CompareTaggedAndBranch(tmp, value, cc, target);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
@ -456,27 +430,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
FeedbackSlot slot,
Label* on_result,
Label::Distance) {
Label fallthrough, clear_slot;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
__ JumpIfCodeTIsMarkedForDeoptimization(
scratch_and_result, temps.AcquireScratch(), &clear_slot);
__ B(on_result);
}
__ bind(&clear_slot);
__ Mov(scratch_and_result, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
scratch_and_result);
__ bind(&fallthrough);
Move(scratch_and_result, 0);
__ TryLoadOptimizedOsrCode(scratch_and_result, feedback_vector, slot,
on_result, Label::Distance::kFar);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
@ -601,16 +556,23 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
ScratchRegisterScope scope(this);
Register temp = scope.AcquireScratch();
Label table;
JumpIf(Condition::kUnsignedGreaterThanEqual, reg, num_labels, &fallthrough);
JumpIf(kUnsignedGreaterThanEqual, reg, num_labels, &fallthrough);
__ Adr(temp, &table);
int entry_size_log2 = 2;
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
++entry_size_log2; // Account for BTI.
constexpr int instructions_per_jump_target = 1;
#else
constexpr int instructions_per_jump_target = 0;
#endif
constexpr int instructions_per_label = 1 + instructions_per_jump_target;
__ Add(temp, temp, Operand(reg, UXTW, entry_size_log2));
__ Br(temp);
{
TurboAssembler::BlockPoolsScope block_pools(masm_, num_labels * kInstrSize);
const int instruction_count =
num_labels * instructions_per_label + instructions_per_jump_target;
TurboAssembler::BlockPoolsScope block_pools(masm_,
instruction_count * kInstrSize);
__ Bind(&table);
for (int i = 0; i < num_labels; ++i) {
__ JumpTarget();
@ -659,7 +621,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
__ JumpIf(Condition::kGreaterThanEqual, params_size, actual_params_size,
__ JumpIf(kGreaterThanEqual, params_size, actual_params_size,
&corrected_args_count);
__ masm()->Mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);

View File

@ -17,8 +17,6 @@ namespace v8 {
namespace internal {
namespace baseline {
enum class Condition : uint32_t;
class BaselineAssembler {
public:
class ScratchRegisterScope;

View File

@ -74,7 +74,7 @@ class BaselineCompilerTask {
return;
}
shared_function_info_->set_baseline_code(ToCodeT(*code), kReleaseStore);
shared_function_info_->set_baseline_code(*code, kReleaseStore);
if (v8_flags.trace_baseline_concurrent_compilation) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
std::stringstream ss;

View File

@ -561,8 +561,8 @@ void BaselineCompiler::VerifyFrame() {
__ Move(scratch, __ FeedbackVectorOperand());
Label is_smi, is_ok;
__ JumpIfSmi(scratch, &is_smi);
__ JumpIfObjectType(Condition::kEqual, scratch, FEEDBACK_VECTOR_TYPE,
scratch, &is_ok);
__ JumpIfObjectType(kEqual, scratch, FEEDBACK_VECTOR_TYPE, scratch,
&is_ok);
__ Bind(&is_smi);
__ masm()->Abort(AbortReason::kExpectedFeedbackVector);
__ Bind(&is_ok);
@ -669,8 +669,8 @@ void BaselineCompiler::JumpIfToBoolean(bool do_jump_if_true, Label* label,
// the original value into kInterpreterAccumulatorRegister, so we don't have
// to worry about it getting clobbered.
static_assert(kReturnRegister0 == kInterpreterAccumulatorRegister);
__ JumpIfSmi(do_jump_if_true ? Condition::kNotEqual : Condition::kEqual,
kReturnRegister1, Smi::FromInt(0), label, distance);
__ JumpIfSmi(do_jump_if_true ? kNotEqual : kEqual, kReturnRegister1,
Smi::FromInt(0), label, distance);
}
void BaselineCompiler::VisitLdaZero() {
@ -942,7 +942,8 @@ void BaselineCompiler::VisitDefineKeyedOwnProperty() {
RegisterOperand(0), // object
RegisterOperand(1), // key
kInterpreterAccumulatorRegister, // value
IndexAsTagged(2)); // slot
Flag8AsSmi(2), // flags
IndexAsTagged(3)); // slot
}
void BaselineCompiler::VisitStaInArrayLiteral() {
@ -1489,8 +1490,7 @@ void BaselineCompiler::VisitTestReferenceEqual() {
SelectBooleanConstant(
kInterpreterAccumulatorRegister,
[&](Label* is_true, Label::Distance distance) {
__ JumpIfTagged(Condition::kEqual,
__ RegisterFrameOperand(RegisterOperand(0)),
__ JumpIfTagged(kEqual, __ RegisterFrameOperand(RegisterOperand(0)),
kInterpreterAccumulatorRegister, is_true, distance);
});
}
@ -1520,8 +1520,8 @@ void BaselineCompiler::VisitTestUndetectable() {
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask, kZero,
&not_undetectable, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@ -1561,7 +1561,7 @@ void BaselineCompiler::VisitTestTypeOf() {
case interpreter::TestTypeOfFlags::LiteralFlag::kNumber: {
Label is_smi, is_heap_number;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
__ JumpIfObjectType(Condition::kEqual, kInterpreterAccumulatorRegister,
__ JumpIfObjectType(kEqual, kInterpreterAccumulatorRegister,
HEAP_NUMBER_TYPE, scratch_scope.AcquireScratch(),
&is_heap_number, Label::kNear);
@ -1577,10 +1577,9 @@ void BaselineCompiler::VisitTestTypeOf() {
Label is_smi, bad_instance_type;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
static_assert(INTERNALIZED_STRING_TYPE == FIRST_TYPE);
__ JumpIfObjectType(Condition::kGreaterThanEqual,
kInterpreterAccumulatorRegister, FIRST_NONSTRING_TYPE,
scratch_scope.AcquireScratch(), &bad_instance_type,
Label::kNear);
__ JumpIfObjectType(kGreaterThanEqual, kInterpreterAccumulatorRegister,
FIRST_NONSTRING_TYPE, scratch_scope.AcquireScratch(),
&bad_instance_type, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@ -1593,7 +1592,7 @@ void BaselineCompiler::VisitTestTypeOf() {
case interpreter::TestTypeOfFlags::LiteralFlag::kSymbol: {
Label is_smi, bad_instance_type;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
__ JumpIfObjectType(Condition::kNotEqual, kInterpreterAccumulatorRegister,
__ JumpIfObjectType(kNotEqual, kInterpreterAccumulatorRegister,
SYMBOL_TYPE, scratch_scope.AcquireScratch(),
&bad_instance_type, Label::kNear);
@ -1623,7 +1622,7 @@ void BaselineCompiler::VisitTestTypeOf() {
case interpreter::TestTypeOfFlags::LiteralFlag::kBigInt: {
Label is_smi, bad_instance_type;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
__ JumpIfObjectType(Condition::kNotEqual, kInterpreterAccumulatorRegister,
__ JumpIfObjectType(kNotEqual, kInterpreterAccumulatorRegister,
BIGINT_TYPE, scratch_scope.AcquireScratch(),
&bad_instance_type, Label::kNear);
@ -1648,7 +1647,7 @@ void BaselineCompiler::VisitTestTypeOf() {
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear);
kZero, &not_undetectable, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@ -1667,10 +1666,10 @@ void BaselineCompiler::VisitTestTypeOf() {
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask,
Condition::kZero, &not_callable, Label::kNear);
__ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask, kZero,
&not_callable, Label::kNear);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kNotZero, &undetectable, Label::kNear);
kNotZero, &undetectable, Label::kNear);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
__ Jump(&done, Label::kNear);
@ -1692,7 +1691,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// If the object's instance type isn't within the range, return false.
static_assert(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
Register map = scratch_scope.AcquireScratch();
__ JumpIfObjectType(Condition::kLessThan, kInterpreterAccumulatorRegister,
__ JumpIfObjectType(kLessThan, kInterpreterAccumulatorRegister,
FIRST_JS_RECEIVER_TYPE, map, &bad_instance_type,
Label::kNear);
@ -1702,8 +1701,7 @@ void BaselineCompiler::VisitTestTypeOf() {
__ TestAndBranch(map_bit_field,
Map::Bits1::IsUndetectableBit::kMask |
Map::Bits1::IsCallableBit::kMask,
Condition::kNotZero, &undetectable_or_callable,
Label::kNear);
kNotZero, &undetectable_or_callable, Label::kNear);
__ Bind(&is_null);
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kTrueValue);
@ -1917,8 +1915,8 @@ void BaselineCompiler::VisitJumpLoop() {
FeedbackVector::kOsrStateOffset);
static_assert(FeedbackVector::MaybeHasOptimizedOsrCodeBit::encode(true) >
FeedbackVector::kMaxOsrUrgency);
__ JumpIfByte(Condition::kUnsignedGreaterThan, osr_state, loop_depth,
&osr_armed, Label::kNear);
__ JumpIfByte(kUnsignedGreaterThan, osr_state, loop_depth, &osr_armed,
Label::kNear);
}
__ Bind(&osr_not_armed);
@ -1945,8 +1943,8 @@ void BaselineCompiler::VisitJumpLoop() {
iterator().GetSlotOperand(2), &osr,
Label::kNear);
__ DecodeField<FeedbackVector::OsrUrgencyBits>(scratch1);
__ JumpIfByte(Condition::kUnsignedLessThanEqual, scratch1, loop_depth,
&osr_not_armed, Label::kNear);
__ JumpIfByte(kUnsignedLessThanEqual, scratch1, loop_depth, &osr_not_armed,
Label::kNear);
__ Bind(&osr);
CallBuiltin<Builtin::kBaselineOnStackReplacement>(maybe_target_code);
@ -2048,7 +2046,7 @@ void BaselineCompiler::VisitJumpIfJSReceiver() {
Label is_smi, dont_jump;
__ JumpIfSmi(kInterpreterAccumulatorRegister, &is_smi, Label::kNear);
__ JumpIfObjectType(Condition::kLessThan, kInterpreterAccumulatorRegister,
__ JumpIfObjectType(kLessThan, kInterpreterAccumulatorRegister,
FIRST_JS_RECEIVER_TYPE, scratch_scope.AcquireScratch(),
&dont_jump);
UpdateInterruptBudgetAndDoInterpreterJump();
@ -2096,8 +2094,7 @@ void BaselineCompiler::VisitForInContinue() {
[&](Label* is_true, Label::Distance distance) {
LoadRegister(kInterpreterAccumulatorRegister, 0);
__ JumpIfTagged(
Condition::kNotEqual,
kInterpreterAccumulatorRegister,
kNotEqual, kInterpreterAccumulatorRegister,
__ RegisterFrameOperand(RegisterOperand(1)),
is_true, distance);
});
@ -2189,8 +2186,8 @@ void BaselineCompiler::VisitThrowIfNotSuperConstructor() {
Register map_bit_field = scratch_scope.AcquireScratch();
__ LoadMap(map_bit_field, reg);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask,
Condition::kNotZero, &done, Label::kNear);
__ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask, kNotZero,
&done, Label::kNear);
CallRuntime(Runtime::kThrowNotSuperConstructor, reg, __ FunctionOperand());

View File

@ -44,32 +44,6 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint32_t {
kEqual = equal,
kNotEqual = not_equal,
kLessThan = less,
kGreaterThan = greater,
kLessThanEqual = less_equal,
kGreaterThanEqual = greater_equal,
kUnsignedLessThan = below,
kUnsignedGreaterThan = above,
kUnsignedLessThanEqual = below_equal,
kUnsignedGreaterThanEqual = above_equal,
kOverflow = overflow,
kNoOverflow = no_overflow,
kZero = zero,
kNotZero = not_zero,
};
inline internal::Condition AsMasmCondition(Condition cond) {
return static_cast<internal::Condition>(cond);
}
namespace detail {
#define __ masm_->
@ -124,7 +98,7 @@ void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
__ cmp(left, Immediate(right));
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
@ -139,13 +113,13 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
} else {
__ test(value, Immediate(mask));
}
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance distance) {
__ cmp(lhs, rhs);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@ -153,7 +127,7 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
Label::Distance distance) {
__ AssertNotSmi(object);
__ CmpObjectType(object, instance_type, map);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@ -167,7 +141,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ movd(eax, xmm0);
}
__ CmpInstanceType(map, instance_type);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
@ -181,31 +155,31 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
} else {
__ cmp(value, Immediate(smi));
}
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ cmp(lhs, rhs);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance distance) {
__ cmp(operand, value);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance distance) {
__ cmp(operand, value);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance distance) {
__ cmpb(value, Immediate(byte));
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
return __ mov(RegisterFrameOperand(output), source);
@ -387,9 +361,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
Register scratch2 = temps.AcquireScratch();
DCHECK(!AreAliased(scratch_and_result, scratch2));
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch2);
__ TestCodeIsMarkedForDeoptimization(scratch_and_result);
__ j(equal, on_result, distance);
__ mov(FieldOperand(feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt())),

View File

@ -37,32 +37,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
enum class Condition : uint32_t {
kEqual = eq,
kNotEqual = ne,
kLessThan = lt,
kGreaterThan = gt,
kLessThanEqual = le,
kGreaterThanEqual = ge,
kUnsignedLessThan = Uless,
kUnsignedGreaterThan = Ugreater,
kUnsignedLessThanEqual = Uless_equal,
kUnsignedGreaterThanEqual = Ugreater_equal,
kOverflow = overflow,
kNoOverflow = no_overflow,
kZero = eq,
kNotZero = ne,
};
inline internal::Condition AsMasmCondition(Condition cond) {
static_assert(sizeof(internal::Condition) == sizeof(Condition));
return static_cast<internal::Condition>(cond);
}
namespace detail {
#ifdef DEBUG
@ -123,12 +97,12 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ And(scratch, value, Operand(mask));
__ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg));
__ Branch(target, cc, scratch, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
__ Branch(target, cc, lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@ -137,7 +111,7 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ GetObjectType(object, map, type);
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
__ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@ -150,7 +124,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
}
__ Ld_d(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
__ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
@ -158,13 +132,13 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Register scratch = temps.AcquireScratch();
__ li(scratch, Operand(smi));
__ SmiUntag(scratch);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
__ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
__ Branch(target, cc, lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@ -172,7 +146,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld_d(scratch, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
__ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
@ -180,11 +154,11 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld_d(scratch, operand);
__ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
__ Branch(target, cc, scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
__ Branch(target, AsMasmCondition(cc), value, Operand(byte));
__ Branch(target, cc, value, Operand(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
Move(RegisterFrameOperand(output), source);
@ -383,8 +357,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeTIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch,
eq, on_result);
__ TestCodeIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch, eq,
on_result);
__ li(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
@ -504,8 +478,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
__ Sub_d(reg, reg, Operand(case_value_base));
}
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(num_labels));
__ Branch(&fallthrough, kUnsignedGreaterThanEqual, reg, Operand(num_labels));
__ GenerateSwitchTable(reg, num_labels,
[labels](size_t i) { return labels[i]; });

View File

@ -37,32 +37,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
enum class Condition : uint32_t {
kEqual = eq,
kNotEqual = ne,
kLessThan = lt,
kGreaterThan = gt,
kLessThanEqual = le,
kGreaterThanEqual = ge,
kUnsignedLessThan = Uless,
kUnsignedGreaterThan = Ugreater,
kUnsignedLessThanEqual = Uless_equal,
kUnsignedGreaterThanEqual = Ugreater_equal,
kOverflow = overflow,
kNoOverflow = no_overflow,
kZero = eq,
kNotZero = ne,
};
inline internal::Condition AsMasmCondition(Condition cond) {
static_assert(sizeof(internal::Condition) == sizeof(Condition));
return static_cast<internal::Condition>(cond);
}
namespace detail {
#ifdef DEBUG
@ -123,12 +97,12 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ And(scratch, value, Operand(mask));
__ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg));
__ Branch(target, cc, scratch, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
__ Branch(target, cc, lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@ -137,7 +111,7 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ GetObjectType(object, map, type);
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
__ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@ -150,7 +124,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
}
__ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
__ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
@ -158,7 +132,7 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld(scratch, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
__ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
@ -166,13 +140,13 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Register scratch = temps.AcquireScratch();
__ li(scratch, Operand(smi));
__ SmiUntag(scratch);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
__ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
__ Branch(target, cc, lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@ -180,7 +154,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld(scratch, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
__ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
@ -188,11 +162,11 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ Ld(scratch, operand);
__ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
__ Branch(target, cc, scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
__ Branch(target, AsMasmCondition(cc), value, Operand(byte));
__ Branch(target, cc, value, Operand(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
@ -393,8 +367,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeTIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch,
eq, on_result);
__ TestCodeIsMarkedForDeoptimizationAndJump(scratch_and_result, scratch, eq,
on_result);
__ li(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
feedback_vector, FeedbackVector::OffsetOfElementAt(slot.ToInt()),
@ -514,8 +488,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
__ Dsubu(reg, reg, Operand(case_value_base));
}
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(num_labels));
__ Branch(&fallthrough, kUnsignedGreaterThanEqual, reg, Operand(num_labels));
__ GenerateSwitchTable(reg, num_labels,
[labels](size_t i) { return labels[i]; });

View File

@ -49,85 +49,24 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
enum class Condition : uint32_t {
kEqual,
kNotEqual,
kLessThan,
kGreaterThan,
kLessThanEqual,
kGreaterThanEqual,
kUnsignedLessThan,
kUnsignedGreaterThan,
kUnsignedLessThanEqual,
kUnsignedGreaterThanEqual,
kOverflow,
kNoOverflow,
kZero,
kNotZero
};
inline internal::Condition AsMasmCondition(Condition cond) {
static_assert(sizeof(internal::Condition) == sizeof(Condition));
switch (cond) {
case Condition::kEqual:
return eq;
case Condition::kNotEqual:
return ne;
case Condition::kLessThan:
return lt;
case Condition::kGreaterThan:
return gt;
case Condition::kLessThanEqual:
return le;
case Condition::kGreaterThanEqual:
return ge;
case Condition::kUnsignedLessThan:
return lt;
case Condition::kUnsignedGreaterThan:
return gt;
case Condition::kUnsignedLessThanEqual:
return le;
case Condition::kUnsignedGreaterThanEqual:
return ge;
case Condition::kOverflow:
return overflow;
case Condition::kNoOverflow:
return nooverflow;
case Condition::kZero:
return eq;
case Condition::kNotZero:
return ne;
default:
UNREACHABLE();
}
}
inline bool IsSignedCondition(Condition cond) {
switch (cond) {
case Condition::kEqual:
case Condition::kNotEqual:
case Condition::kLessThan:
case Condition::kGreaterThan:
case Condition::kLessThanEqual:
case Condition::kGreaterThanEqual:
case Condition::kOverflow:
case Condition::kNoOverflow:
case Condition::kZero:
case Condition::kNotZero:
case kEqual:
case kNotEqual:
case kLessThan:
case kGreaterThan:
case kLessThanEqual:
case kGreaterThanEqual:
case kOverflow:
case kNoOverflow:
case kZero:
case kNotZero:
return true;
case Condition::kUnsignedLessThan:
case Condition::kUnsignedGreaterThan:
case Condition::kUnsignedLessThanEqual:
case Condition::kUnsignedGreaterThanEqual:
case kUnsignedLessThan:
case kUnsignedGreaterThan:
case kUnsignedLessThanEqual:
case kUnsignedGreaterThanEqual:
return false;
default:
@ -155,7 +94,7 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
__ CmpU32(lhs, rhs);
}
}
__ b(AsMasmCondition(cc), target);
__ b(check_condition(cc), target);
}
#undef __
@ -221,7 +160,7 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
__ AndU64(r0, value, Operand(mask), ip, SetRC);
__ b(AsMasmCondition(cc), target, cr0);
__ b(check_condition(cc), target, cr0);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
@ -232,7 +171,7 @@ void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
} else {
__ CmpU64(lhs, rhs, r0);
}
__ b(AsMasmCondition(cc), target);
__ b(check_condition(cc), target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
@ -537,7 +476,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch, r0);
__ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch, r0);
__ beq(on_result, cr0);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
@ -674,8 +613,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
}
// Mostly copied from code-generator-arm.cc
JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels),
&fallthrough);
JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough);
// Ensure to emit the constant pool first if necessary.
int entry_size_log2 = 3;
__ ShiftLeftU32(reg, reg, Operand(entry_size_log2));
@ -737,8 +675,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
JumpIfHelper(__ masm(), Condition::kGreaterThanEqual, params_size,
actual_params_size, &corrected_args_count);
JumpIfHelper(__ masm(), kGreaterThanEqual, params_size, actual_params_size,
&corrected_args_count);
__ masm()->mr(params_size, actual_params_size);
__ Bind(&corrected_args_count);

View File

@ -36,31 +36,6 @@ class BaselineAssembler::ScratchRegisterScope {
UseScratchRegisterScope wrapped_scope_;
};
enum class Condition : uint32_t {
kEqual = eq,
kNotEqual = ne,
kLessThan = lt,
kGreaterThan = gt,
kLessThanEqual = le,
kGreaterThanEqual = ge,
kUnsignedLessThan = Uless,
kUnsignedGreaterThan = Ugreater,
kUnsignedLessThanEqual = Uless_equal,
kUnsignedGreaterThanEqual = Ugreater_equal,
kOverflow = overflow,
kNoOverflow = no_overflow,
kZero = eq,
kNotZero = ne,
};
inline internal::Condition AsMasmCondition(Condition cond) {
return static_cast<internal::Condition>(cond);
}
namespace detail {
#ifdef DEBUG
@ -121,12 +96,12 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ And(tmp, value, Operand(mask));
__ Branch(target, AsMasmCondition(cc), tmp, Operand(zero_reg));
__ Branch(target, cc, tmp, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance) {
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
__ Branch(target, cc, lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@ -135,7 +110,7 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
__ GetObjectType(object, map, type);
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
__ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@ -148,7 +123,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
}
__ LoadWord(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
__ Branch(target, cc, type, Operand(instance_type));
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
@ -156,7 +131,7 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch();
__ LoadWord(temp, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(temp));
__ Branch(target, cc, value, Operand(temp));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Label* target, Label::Distance) {
@ -164,14 +139,14 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
Register temp = temps.AcquireScratch();
__ li(temp, Operand(smi));
__ SmiUntag(temp);
__ Branch(target, AsMasmCondition(cc), value, Operand(temp));
__ Branch(target, cc, value, Operand(temp));
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
// todo: compress pointer
__ AssertSmi(lhs);
__ AssertSmi(rhs);
__ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
__ Branch(target, cc, lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
@ -180,7 +155,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ LoadWord(scratch, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
__ Branch(target, cc, value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
@ -189,11 +164,11 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ LoadWord(scratch, operand);
__ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
__ Branch(target, cc, scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
__ Branch(target, AsMasmCondition(cc), value, Operand(byte));
__ Branch(target, cc, value, Operand(byte));
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
@ -383,8 +358,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
// Is it marked_for_deoptimization? If yes, clear the slot.
{
ScratchRegisterScope temps(this);
__ JumpIfCodeTIsMarkedForDeoptimization(
scratch_and_result, temps.AcquireScratch(), &clear_slot);
__ JumpIfCodeIsMarkedForDeoptimization(scratch_and_result,
temps.AcquireScratch(), &clear_slot);
Jump(on_result);
}
@ -519,8 +494,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-riscv64.cc
ScratchRegisterScope scope(this);
Label table;
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(num_labels));
__ Branch(&fallthrough, kUnsignedGreaterThanEqual, reg, Operand(num_labels));
int64_t imm64;
imm64 = __ branch_long_offset(&table);
CHECK(is_int32(imm64 + 0x800));

View File

@ -48,85 +48,24 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
// TODO(v8:11429,leszeks): Unify condition names in the MacroAssembler.
enum class Condition : uint32_t {
kEqual,
kNotEqual,
kLessThan,
kGreaterThan,
kLessThanEqual,
kGreaterThanEqual,
kUnsignedLessThan,
kUnsignedGreaterThan,
kUnsignedLessThanEqual,
kUnsignedGreaterThanEqual,
kOverflow,
kNoOverflow,
kZero,
kNotZero
};
inline internal::Condition AsMasmCondition(Condition cond) {
static_assert(sizeof(internal::Condition) == sizeof(Condition));
switch (cond) {
case Condition::kEqual:
return eq;
case Condition::kNotEqual:
return ne;
case Condition::kLessThan:
return lt;
case Condition::kGreaterThan:
return gt;
case Condition::kLessThanEqual:
return le;
case Condition::kGreaterThanEqual:
return ge;
case Condition::kUnsignedLessThan:
return lt;
case Condition::kUnsignedGreaterThan:
return gt;
case Condition::kUnsignedLessThanEqual:
return le;
case Condition::kUnsignedGreaterThanEqual:
return ge;
case Condition::kOverflow:
return overflow;
case Condition::kNoOverflow:
return nooverflow;
case Condition::kZero:
return eq;
case Condition::kNotZero:
return ne;
default:
UNREACHABLE();
}
}
inline bool IsSignedCondition(Condition cond) {
switch (cond) {
case Condition::kEqual:
case Condition::kNotEqual:
case Condition::kLessThan:
case Condition::kGreaterThan:
case Condition::kLessThanEqual:
case Condition::kGreaterThanEqual:
case Condition::kOverflow:
case Condition::kNoOverflow:
case Condition::kZero:
case Condition::kNotZero:
case kEqual:
case kNotEqual:
case kLessThan:
case kGreaterThan:
case kLessThanEqual:
case kGreaterThanEqual:
case kOverflow:
case kNoOverflow:
case kZero:
case kNotZero:
return true;
case Condition::kUnsignedLessThan:
case Condition::kUnsignedGreaterThan:
case Condition::kUnsignedLessThanEqual:
case Condition::kUnsignedGreaterThanEqual:
case kUnsignedLessThan:
case kUnsignedGreaterThan:
case kUnsignedLessThanEqual:
case kUnsignedGreaterThanEqual:
return false;
default:
@ -154,7 +93,7 @@ static void JumpIfHelper(MacroAssembler* assm, Condition cc, Register lhs,
__ CmpU32(lhs, rhs);
}
}
__ b(AsMasmCondition(cc), target);
__ b(check_condition(cc), target);
}
#undef __
@ -220,7 +159,7 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
Label* target, Label::Distance) {
ASM_CODE_COMMENT(masm_);
__ AndP(r0, value, Operand(mask));
__ b(AsMasmCondition(cc), target);
__ b(check_condition(cc), target);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
@ -231,7 +170,7 @@ void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
} else {
__ CmpU64(lhs, rhs);
}
__ b(AsMasmCondition(cc), target);
__ b(check_condition(cc), target);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
@ -550,7 +489,7 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
{
ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch();
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, scratch);
__ TestCodeIsMarkedForDeoptimization(scratch_and_result, scratch);
__ beq(on_result);
__ mov(scratch, __ ClearedValue());
StoreTaggedFieldNoWriteBarrier(
@ -687,8 +626,7 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-arm.cc
ScratchRegisterScope scope(this);
JumpIf(Condition::kUnsignedGreaterThanEqual, reg, Operand(num_labels),
&fallthrough);
JumpIf(kUnsignedGreaterThanEqual, reg, Operand(num_labels), &fallthrough);
// Ensure to emit the constant pool first if necessary.
int entry_size_log2 = 3;
__ ShiftLeftU32(reg, reg, Operand(entry_size_log2));
@ -745,8 +683,8 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
Label corrected_args_count;
JumpIfHelper(__ masm(), Condition::kGreaterThanEqual, params_size,
actual_params_size, &corrected_args_count);
JumpIfHelper(__ masm(), kGreaterThanEqual, params_size, actual_params_size,
&corrected_args_count);
__ masm()->mov(params_size, actual_params_size);
__ Bind(&corrected_args_count);

View File

@ -46,32 +46,6 @@ class BaselineAssembler::ScratchRegisterScope {
int registers_used_;
};
// TODO(v8:11461): Unify condition names in the MacroAssembler.
enum class Condition : uint32_t {
kEqual = equal,
kNotEqual = not_equal,
kLessThan = less,
kGreaterThan = greater,
kLessThanEqual = less_equal,
kGreaterThanEqual = greater_equal,
kUnsignedLessThan = below,
kUnsignedGreaterThan = above,
kUnsignedLessThanEqual = below_equal,
kUnsignedGreaterThanEqual = above_equal,
kOverflow = overflow,
kNoOverflow = no_overflow,
kZero = zero,
kNotZero = not_zero,
};
inline internal::Condition AsMasmCondition(Condition cond) {
return static_cast<internal::Condition>(cond);
}
namespace detail {
#define __ masm_->
@ -130,13 +104,13 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
} else {
__ testl(value, Immediate(mask));
}
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
Label* target, Label::Distance distance) {
__ cmpq(lhs, rhs);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
InstanceType instance_type,
@ -144,7 +118,7 @@ void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
Label::Distance distance) {
__ AssertNotSmi(object);
__ CmpObjectType(object, instance_type, map);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
InstanceType instance_type,
@ -156,30 +130,30 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ Assert(equal, AbortReason::kUnexpectedValue);
}
__ CmpInstanceType(map, instance_type);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance distance) {
__ cmpq(value, operand);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Smi smi,
Label* target, Label::Distance distance) {
__ SmiCompare(lhs, smi);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance distance) {
__ SmiCompare(lhs, rhs);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
__ cmpq(left, Immediate(right));
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
// cmp_tagged
@ -187,18 +161,18 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance distance) {
__ cmp_tagged(value, operand);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance distance) {
__ cmp_tagged(operand, value);
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance distance) {
__ cmpb(value, Immediate(byte));
__ j(AsMasmCondition(cc), target, distance);
__ j(cc, target, distance);
}
void BaselineAssembler::Move(interpreter::Register output, Register source) {
@ -403,24 +377,8 @@ void BaselineAssembler::TryLoadOptimizedOsrCode(Register scratch_and_result,
FeedbackSlot slot,
Label* on_result,
Label::Distance distance) {
Label fallthrough;
LoadTaggedPointerField(scratch_and_result, feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt()));
__ LoadWeakValue(scratch_and_result, &fallthrough);
// Is it marked_for_deoptimization? If yes, clear the slot.
{
DCHECK(!AreAliased(scratch_and_result, kScratchRegister));
__ TestCodeTIsMarkedForDeoptimization(scratch_and_result, kScratchRegister);
__ j(equal, on_result, distance);
__ StoreTaggedField(
FieldOperand(feedback_vector,
FeedbackVector::OffsetOfElementAt(slot.ToInt())),
__ ClearedValue());
}
__ bind(&fallthrough);
__ Move(scratch_and_result, 0);
__ MacroAssembler::TryLoadOptimizedOsrCode(
scratch_and_result, feedback_vector, slot, on_result, distance);
}
void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(

View File

@ -531,11 +531,11 @@ char* ToStringFormatter::ProcessLevel(RecursionLevel* level, Digits chunk,
// Step 5: Recurse.
char* end_of_right_part = ProcessLevel(level->next_, right, out, false);
if (processor_->should_terminate()) return out;
// The recursive calls are required and hence designed to write exactly as
// many characters as their level is responsible for.
DCHECK(end_of_right_part == out - level->char_count_);
USE(end_of_right_part);
if (processor_->should_terminate()) return out;
// We intentionally don't use {end_of_right_part} here to be prepared for
// potential future multi-threaded execution.
return ProcessLevel(level->next_, left, out - level->char_count_,
@ -575,6 +575,7 @@ void ProcessorImpl::ToStringImpl(char* out, int* out_length, Digits X,
}
int excess = formatter.Finish();
*out_length -= excess;
memset(out + *out_length, 0, excess);
}
Status Processor::ToString(char* out, int* out_length, Digits X, int radix,

View File

@ -327,7 +327,7 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ b(ne, &not_baseline);
@ -1500,7 +1500,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ ldr(r2,
FieldMemOperand(r2, InterpreterData::kInterpreterTrampolineOffset));
__ add(r2, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
__ LoadCodeEntry(r2, r2);
__ b(&trampoline_loaded);
__ bind(&builtin_trampoline);
@ -1714,8 +1714,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
// precondition here is: if maybe_target_code is a Code object, it must NOT
// be marked_for_deoptimization (callers must ensure this).
// precondition here is: if maybe_target_code is a InstructionStream object,
// it must NOT be marked_for_deoptimization (callers must ensure this).
__ cmp(maybe_target_code, Operand(Smi::zero()));
__ b(ne, &jump_to_optimized_code);
}
@ -1758,14 +1758,20 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB);
}
__ LoadCodeInstructionStreamNonBuiltin(r0, r0);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ ldr(r1,
FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset));
__ ldr(
r1,
FieldMemOperand(
r0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
__ add(r0, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start
__ add(r0, r0,
Operand(InstructionStream::kHeaderSize -
kHeapObjectTag)); // InstructionStream start
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@ -1997,7 +2003,8 @@ void Generate_AllocateSpaceAndShiftExistingArguments(
} // namespace
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
@ -2727,8 +2734,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
ArgvMode argv_mode, bool builtin_exit_frame) {
// Called from JavaScript; parameters are on stack as if calling JS function.
// r0: number of arguments including receiver
// r1: pointer to builtin function
@ -2753,8 +2759,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame that transitions from JavaScript to C++.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
save_doubles == SaveFPRegsMode::kSave, 0,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Store a copy of argc in callee-saved registers for later.
__ mov(r4, Operand(r0));
@ -2815,7 +2820,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
? no_reg
// Callee-saved register r4 still holds argc.
: r4;
__ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc);
__ LeaveExitFrame(argc, false);
__ mov(pc, lr);
// Handling of exception.
@ -3050,7 +3055,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
DCHECK_EQ(stack_space, 0);
__ ldr(r4, *stack_space_operand);
}
__ LeaveExitFrame(false, r4, stack_space_operand != nullptr);
__ LeaveExitFrame(r4, stack_space_operand != nullptr);
// Check if the function scheduled an exception.
__ LoadRoot(r4, RootIndex::kTheHoleValue);
@ -3155,9 +3160,8 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space since
// it's not controlled by GC.
static constexpr int kApiStackSpace = 4;
static constexpr bool kDontSaveDoubles = false;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
__ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
@ -3238,7 +3242,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, kApiStackSpace);
__ EnterExitFrame(kApiStackSpace, StackFrame::EXIT);
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
@ -3262,8 +3266,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
// purpose Code object) to be able to call into C functions that may trigger
// GC and thus move the caller.
// purpose InstructionStream object) to be able to call into C functions that
// may trigger GC and thus move the caller.
//
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
@ -3557,7 +3561,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = r1;
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Get the Code object from the shared function info.
// Get the InstructionStream object from the shared function info.
Register code_obj = r4;
__ ldr(code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@ -3568,7 +3572,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
__ CompareObjectType(code_obj, r3, r3, CODE_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@ -3581,13 +3585,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
__ CompareObjectType(code_obj, r3, r3, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
AssertCodeIsBaseline(masm, code_obj, r3);
}
__ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = r2;
@ -3662,9 +3667,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister, temps.Acquire());
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
} else {
__ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
__ add(code_obj, code_obj,
Operand(InstructionStream::kHeaderSize - kHeapObjectTag));
__ Jump(code_obj);
}
__ Trap(); // Unreachable.

View File

@ -388,19 +388,19 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ Unreachable();
}
static void AssertCodeTIsBaselineAllowClobber(MacroAssembler* masm,
Register code, Register scratch) {
static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
Register code, Register scratch) {
// Verify that the code kind is baseline code via the CodeKind.
__ Ldr(scratch, FieldMemOperand(code, CodeT::kFlagsOffset));
__ DecodeField<CodeT::KindField>(scratch);
__ Ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
__ DecodeField<Code::KindField>(scratch);
__ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
static void AssertCodeTIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
Register scratch) {
DCHECK(!AreAliased(code, scratch));
return AssertCodeTIsBaselineAllowClobber(masm, code, scratch);
return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
@ -411,11 +411,11 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
__ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
__ CompareObjectType(sfi_data, scratch1, scratch1, CODE_TYPE);
if (v8_flags.debug_code) {
Label not_baseline;
__ B(ne, &not_baseline);
AssertCodeTIsBaseline(masm, sfi_data, scratch1);
AssertCodeIsBaseline(masm, sfi_data, scratch1);
__ B(eq, is_baseline);
__ Bind(&not_baseline);
} else {
@ -550,7 +550,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Mov(x1, x4);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ LoadTaggedPointerField(x2, FieldMemOperand(x1, JSFunction::kCodeOffset));
__ JumpCodeTObject(x2);
__ JumpCodeObject(x2);
}
__ Bind(&prepare_step_in_if_stepping);
@ -758,7 +758,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
//
// Invoke the function by calling through JS entry trampoline builtin and
// pop the faked function when we return.
Handle<CodeT> trampoline_code =
Handle<Code> trampoline_code =
masm->isolate()->builtins()->code_handle(entry_trampoline);
__ Call(trampoline_code, RelocInfo::CODE_TARGET);
@ -932,9 +932,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// x28 : pointer cage base register (kPtrComprCageBaseRegister).
// x29 : frame pointer (fp).
Handle<CodeT> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
Handle<Code> builtin = is_construct
? BUILTIN_CODE(masm->isolate(), Construct)
: masm->isolate()->builtins()->Call();
__ Call(builtin, RelocInfo::CODE_TARGET);
// Exit the JS internal frame and remove the parameters (except function),
@ -1503,7 +1503,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(
__ Move(x2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
__ ReplaceClosureCodeWithOptimizedCode(x2, closure);
__ JumpCodeTObject(x2);
__ JumpCodeObject(x2);
__ bind(&install_baseline_code);
__ GenerateTailCallToReturnedCode(Runtime::kInstallBaselineCode);
@ -1743,7 +1743,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
__ LoadTaggedPointerField(
x1, FieldMemOperand(x1, InterpreterData::kInterpreterTrampolineOffset));
__ LoadCodeTEntry(x1, x1);
__ LoadCodeEntry(x1, x1);
__ B(&trampoline_loaded);
__ Bind(&builtin_trampoline);
@ -1951,8 +1951,8 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
Label jump_to_optimized_code;
{
// If maybe_target_code is not null, no need to call into runtime. A
// precondition here is: if maybe_target_code is a Code object, it must NOT
// be marked_for_deoptimization (callers must ensure this).
// precondition here is: if maybe_target_code is a InstructionStream object,
// it must NOT be marked_for_deoptimization (callers must ensure this).
__ CompareTaggedAndBranch(x0, Smi::zero(), ne, &jump_to_optimized_code);
}
@ -1993,15 +1993,14 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
__ LeaveFrame(StackFrame::STUB);
}
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(x0, x0);
}
__ LoadCodeInstructionStreamNonBuiltin(x0, x0);
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
x1,
FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset));
FieldMemOperand(
x0, InstructionStream::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@ -2012,7 +2011,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source,
// Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ Add(x0, x0, x1);
Generate_OSREntry(masm, x0, Code::kHeaderSize - kHeapObjectTag);
Generate_OSREntry(masm, x0, InstructionStream::kHeaderSize - kHeapObjectTag);
}
} // namespace
@ -2334,9 +2333,10 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
} // namespace
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
// TODO(v8:11615): Observe InstructionStream::kMaxArguments in
// CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeT> code) {
Handle<Code> code) {
// ----------- S t a t e -------------
// -- x1 : target
// -- x0 : number of parameters on the stack
@ -2411,7 +2411,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// static
void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<CodeT> code) {
Handle<Code> code) {
// ----------- S t a t e -------------
// -- x0 : the number of arguments
// -- x3 : the new.target (for [[Construct]] calls)
@ -4751,8 +4751,7 @@ void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame) {
ArgvMode argv_mode, bool builtin_exit_frame) {
// The Abort mechanism relies on CallRuntime, which in turn relies on
// CEntry, so until this stub has been generated, we have to use a
// fall-back Abort mechanism.
@ -4808,7 +4807,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
// Enter the exit frame.
FrameScope scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(
save_doubles == SaveFPRegsMode::kSave, x10, extra_stack_space,
x10, extra_stack_space,
builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
// Poke callee-saved registers into reserved space.
@ -4889,7 +4888,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Peek(argc, 2 * kSystemPointerSize);
__ Peek(target, 3 * kSystemPointerSize);
__ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, x10, x9);
__ LeaveExitFrame(x10, x9);
if (argv_mode == ArgvMode::kStack) {
// Drop the remaining stack slots and return from the stub.
__ DropArguments(x11);
@ -5137,7 +5136,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
__ Ldr(x19, *stack_space_operand);
}
__ LeaveExitFrame(false, x1, x5);
__ LeaveExitFrame(x1, x5);
// Check if the function scheduled an exception.
__ Mov(x5, ExternalReference::scheduled_exception_address(isolate));
@ -5249,11 +5248,10 @@ void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
// Allocate the v8::Arguments structure in the arguments' space, since it's
// not controlled by GC.
static constexpr int kApiStackSpace = 4;
static constexpr bool kDontSaveDoubles = false;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(kDontSaveDoubles, x10,
kApiStackSpace + kCallApiFunctionSpillSpace);
__ EnterExitFrame(x10, kApiStackSpace + kCallApiFunctionSpillSpace,
StackFrame::EXIT);
// FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
// Arguments are after the return address (pushed by EnterExitFrame()).
@ -5349,7 +5347,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
const int kApiStackSpace = 1;
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
__ EnterExitFrame(x10, kApiStackSpace + kCallApiFunctionSpillSpace,
StackFrame::EXIT);
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
@ -5379,8 +5378,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
// The sole purpose of DirectCEntry is for movable callers (e.g. any general
// purpose Code object) to be able to call into C functions that may trigger
// GC and thus move the caller.
// purpose InstructionStream object) to be able to call into C functions that
// may trigger GC and thus move the caller.
//
// DirectCEntry places the return address on the stack (updated by the GC),
// making the call GC safe. The irregexp backend relies on this.
@ -5695,7 +5694,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register closure = x1;
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Get the Code object from the shared function info.
// Get the InstructionStream object from the shared function info.
Register code_obj = x22;
__ LoadTaggedPointerField(
code_obj,
@ -5708,7 +5707,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
__ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
__ CompareObjectType(code_obj, x3, x3, CODE_TYPE);
__ B(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@ -5721,16 +5720,14 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (v8_flags.debug_code) {
__ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
__ CompareObjectType(code_obj, x3, x3, CODE_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
if (v8_flags.debug_code) {
AssertCodeTIsBaseline(masm, code_obj, x3);
}
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
AssertCodeIsBaseline(masm, code_obj, x3);
}
__ LoadCodeInstructionStreamNonBuiltin(code_obj, code_obj);
// Load the feedback vector.
Register feedback_vector = x2;
@ -5803,9 +5800,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (is_osr) {
ResetBytecodeAge(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
Generate_OSREntry(masm, code_obj,
InstructionStream::kHeaderSize - kHeapObjectTag);
} else {
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Add(code_obj, code_obj, InstructionStream::kHeaderSize - kHeapObjectTag);
__ Jump(code_obj);
}
__ Trap(); // Unreachable.

View File

@ -69,13 +69,23 @@ transitioning builtin ConvertToLocaleString(
try {
const callable: Callable = Cast<Callable>(prop) otherwise TypeError;
let result: JSAny;
if (IsNullOrUndefined(locales)) {
result = Call(context, callable, element);
} else if (IsNullOrUndefined(options)) {
result = Call(context, callable, element, locales);
} else {
// According to the ECMA-402 specification, the optional arguments locales
// and options must be passed.
@if(V8_INTL_SUPPORT) {
result = Call(context, callable, element, locales, options);
}
// Without the ECMA-402 internationalization API, the optional arguments
// must not be passed.
// See: https://tc39.es/ecma262/#sec-array.prototype.tolocalestring
@ifnot(V8_INTL_SUPPORT) {
result = Call(context, callable, element);
// Use the remaining parameters.
const _locales = locales;
const _options = options;
}
return ToString_Inline(result);
} label TypeError {
ThrowTypeError(MessageTemplate::kCalledNonCallable, prop);

View File

@ -229,7 +229,7 @@ type RawPtr generates 'TNode<RawPtrT>' constexpr 'Address';
type RawPtr<To: type> extends RawPtr;
type ExternalPointer
generates 'TNode<ExternalPointerT>' constexpr 'ExternalPointer_t';
extern class Code extends HeapObject;
extern class InstructionStream extends HeapObject;
type BuiltinPtr extends Smi generates 'TNode<BuiltinPtr>';
type Number = Smi|HeapNumber;
@ -488,6 +488,8 @@ const kFixedDoubleArrayMaxLength:
constexpr int31 generates 'FixedDoubleArray::kMaxLength';
const kObjectAlignmentMask: constexpr intptr
generates 'kObjectAlignmentMask';
const kObjectAlignment: constexpr intptr
generates 'kObjectAlignment';
const kMinAddedElementsCapacity:
constexpr int31 generates 'JSObject::kMinAddedElementsCapacity';
const kMaxFastArrayLength:

View File

@ -1808,7 +1808,7 @@ TF_BUILTIN(ArrayConstructor, ArrayBuiltinsAssembler) {
void ArrayBuiltinsAssembler::TailCallArrayConstructorStub(
const Callable& callable, TNode<Context> context, TNode<JSFunction> target,
TNode<HeapObject> allocation_site_or_undefined, TNode<Int32T> argc) {
TNode<CodeT> code = HeapConstant(callable.code());
TNode<Code> code = HeapConstant(callable.code());
// We are going to call here ArrayNoArgumentsConstructor or
// ArraySingleArgumentsConstructor which in addition to the register arguments

View File

@ -1849,5 +1849,12 @@ BUILTIN(ArrayPrototypeGroupToMap) {
return *map;
}
BUILTIN(ArrayFromAsync) {
HandleScope scope(isolate);
DCHECK(v8_flags.harmony_array_from_async);
return ReadOnlyRoots(isolate).undefined_value();
}
} // namespace internal
} // namespace v8

View File

@ -180,7 +180,7 @@ void AsyncBuiltinsAssembler::InitializeNativeClosure(
// which almost doubles the size of `await` builtins (unnecessarily).
TNode<Smi> builtin_id = LoadObjectField<Smi>(
shared_info, SharedFunctionInfo::kFunctionDataOffset);
TNode<CodeT> code = LoadBuiltin(builtin_id);
TNode<Code> code = LoadBuiltin(builtin_id);
StoreObjectFieldNoWriteBarrier(function, JSFunction::kCodeOffset, code);
}

View File

@ -510,6 +510,19 @@ TF_BUILTIN(AsyncGeneratorResumeNext, AsyncGeneratorBuiltinsAssembler) {
// Remember the {resume_type} for the {generator}.
StoreObjectFieldNoWriteBarrier(
generator, JSGeneratorObject::kResumeModeOffset, resume_type);
Label if_instrumentation(this, Label::kDeferred),
if_instrumentation_done(this);
Branch(IsDebugActive(), &if_instrumentation, &if_instrumentation_done);
BIND(&if_instrumentation);
{
const TNode<JSPromise> promise = LoadObjectField<JSPromise>(
next, AsyncGeneratorRequest::kPromiseOffset);
CallRuntime(Runtime::kDebugPushPromise, context, promise);
Goto(&if_instrumentation_done);
}
BIND(&if_instrumentation_done);
CallStub(CodeFactory::ResumeGenerator(isolate()), context,
LoadValueFromAsyncGeneratorRequest(next), generator);
var_state = LoadGeneratorState(generator);

View File

@ -8,6 +8,8 @@ namespace bigint {
const kPositiveSign: uint32 = 0;
const kNegativeSign: uint32 = 1;
const kGreaterThan: intptr = 1;
const kLessThan: intptr = -1;
const kMustRoundDownBitShift: uint32 = 30;
@ -798,6 +800,61 @@ builtin BigIntEqual(implicit context: Context)(x: BigInt, y: BigInt): Boolean {
return True;
}
// Returns r such that r < 0 if |x| < |y|; r > 0 if |x| > |y|;
// r == 0 if |x| == |y|.
macro BigIntCompareAbsolute(implicit context: Context)(
x: BigInt, y: BigInt): intptr {
const xlength = ReadBigIntLength(x);
const ylength = ReadBigIntLength(y);
const diff = xlength - ylength;
if (diff != 0) {
return diff;
}
// case: {xlength} == {ylength}
for (let i: intptr = xlength - 1; i >= 0; --i) {
const xdigit = LoadBigIntDigit(x, i);
const ydigit = LoadBigIntDigit(y, i);
if (xdigit != ydigit) {
return (xdigit > ydigit) ? kGreaterThan : kLessThan;
}
}
return 0;
}
// Returns r such that r < 0 if x < y; r > 0 if x > y; r == 0 if x == y.
macro BigIntCompare(implicit context: Context)(x: BigInt, y: BigInt): intptr {
const xsign = ReadBigIntSign(x);
const ysign = ReadBigIntSign(y);
if (xsign != ysign) {
return xsign == kPositiveSign ? kGreaterThan : kLessThan;
}
// case: {xsign} == {ysign}
const diff = BigIntCompareAbsolute(x, y);
return xsign == kPositiveSign ? diff : 0 - diff;
}
builtin BigIntLessThan(implicit context: Context)(
x: BigInt, y: BigInt): Boolean {
return BigIntCompare(x, y) < 0 ? True : False;
}
builtin BigIntGreaterThan(implicit context: Context)(
x: BigInt, y: BigInt): Boolean {
return BigIntCompare(x, y) > 0 ? True : False;
}
builtin BigIntLessThanOrEqual(implicit context: Context)(
x: BigInt, y: BigInt): Boolean {
return BigIntCompare(x, y) <= 0 ? True : False;
}
builtin BigIntGreaterThanOrEqual(implicit context: Context)(
x: BigInt, y: BigInt): Boolean {
return BigIntCompare(x, y) >= 0 ? True : False;
}
builtin BigIntUnaryMinus(implicit context: Context)(bigint: BigInt): BigInt {
const length = ReadBigIntLength(bigint);

View File

@ -24,6 +24,7 @@ namespace internal {
isolate->factory()->NewStringFromAsciiChecked(method))); \
} \
Handle<CallSiteInfo> frame = Handle<CallSiteInfo>::cast(it.GetDataValue())
namespace {
Object PositiveNumberOrNull(int value, Isolate* isolate) {
@ -31,6 +32,10 @@ Object PositiveNumberOrNull(int value, Isolate* isolate) {
return ReadOnlyRoots(isolate).null_value();
}
bool NativeContextIsForShadowRealm(NativeContext native_context) {
return native_context.scope_info().scope_type() == SHADOW_REALM_SCOPE;
}
} // namespace
BUILTIN(CallSitePrototypeGetColumnNumber) {
@ -69,8 +74,13 @@ BUILTIN(CallSitePrototypeGetFunction) {
static const char method_name[] = "getFunction";
HandleScope scope(isolate);
CHECK_CALLSITE(frame, method_name);
if (isolate->raw_native_context().scope_info().scope_type() ==
SHADOW_REALM_SCOPE) {
// ShadowRealms have a boundary: references to outside objects must not exist
// in the ShadowRealm, and references to ShadowRealm objects must not exist
// outside the ShadowRealm.
if (NativeContextIsForShadowRealm(isolate->raw_native_context()) ||
(frame->function().IsJSFunction() &&
NativeContextIsForShadowRealm(
JSFunction::cast(frame->function()).native_context()))) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(
@ -136,8 +146,13 @@ BUILTIN(CallSitePrototypeGetThis) {
static const char method_name[] = "getThis";
HandleScope scope(isolate);
CHECK_CALLSITE(frame, method_name);
if (isolate->raw_native_context().scope_info().scope_type() ==
SHADOW_REALM_SCOPE) {
// ShadowRealms have a boundary: references to outside objects must not exist
// in the ShadowRealm, and references to ShadowRealm objects must not exist
// outside the ShadowRealm.
if (NativeContextIsForShadowRealm(isolate->raw_native_context()) ||
(frame->function().IsJSFunction() &&
NativeContextIsForShadowRealm(
JSFunction::cast(frame->function()).native_context()))) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(

View File

@ -435,8 +435,8 @@ void BaseCollectionsAssembler::GotoIfCannotBeHeldWeakly(
// TODO(v8:12547) Shared structs and arrays should only be able to point
// to shared values in weak collections. For now, disallow them as weak
// collection keys.
GotoIf(IsJSSharedStructInstanceType(instance_type), if_cannot_be_held_weakly);
GotoIf(IsJSSharedArrayInstanceType(instance_type), if_cannot_be_held_weakly);
GotoIf(IsAlwaysSharedSpaceJSObjectInstanceType(instance_type),
if_cannot_be_held_weakly);
Goto(&end);
Bind(&check_symbol_key);
GotoIfNot(HasHarmonySymbolAsWeakmapKeyFlag(), if_cannot_be_held_weakly);
@ -1293,10 +1293,7 @@ void CollectionsBuiltinsAssembler::SameValueZeroString(
GotoIf(TaggedIsSmi(candidate_key), if_not_same);
GotoIfNot(IsString(CAST(candidate_key)), if_not_same);
Branch(TaggedEqual(CallBuiltin(Builtin::kStringEqual, NoContextConstant(),
key_string, candidate_key),
TrueConstant()),
if_same, if_not_same);
BranchIfStringEqual(key_string, CAST(candidate_key), if_same, if_not_same);
}
void CollectionsBuiltinsAssembler::SameValueZeroBigInt(

View File

@ -252,8 +252,7 @@ TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) {
StoreObjectFieldNoWriteBarrier(result, JSFunction::kSharedFunctionInfoOffset,
shared_function_info);
StoreObjectFieldNoWriteBarrier(result, JSFunction::kContextOffset, context);
TNode<CodeT> lazy_builtin =
HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
TNode<Code> lazy_builtin = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
StoreObjectFieldNoWriteBarrier(result, JSFunction::kCodeOffset, lazy_builtin);
Return(result);
}

View File

@ -0,0 +1,30 @@
// Copyright 2023 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-data-view-gen.h"
#include "src/builtins/builtins-utils-gen.h"
namespace v8 {
namespace internal {
// Returns (intptr) a byte length value from [0..JSArrayBuffer::kMaxLength]
// If it fails (due to detached or OOB), it returns -1.
TF_BUILTIN(DataViewGetVariableLength, DataViewBuiltinsAssembler) {
auto dataview = Parameter<JSDataView>(Descriptor::kDataView);
CSA_CHECK(this, IsVariableLengthJSArrayBufferView(dataview));
Label detached_or_oob(this);
auto buffer =
LoadObjectField<JSArrayBuffer>(dataview, JSDataView::kBufferOffset);
TNode<UintPtrT> byte_length = LoadVariableLengthJSArrayBufferViewByteLength(
dataview, buffer, &detached_or_oob);
Return(byte_length);
BIND(&detached_or_oob);
Return(IntPtrConstant(-1));
}
} // namespace internal
} // namespace v8

View File

@ -207,18 +207,11 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) {
// Slow-case with actual string comparisons.
GotoIf(TaggedIsSmi(hint), &hint_is_invalid);
GotoIfNot(IsString(CAST(hint)), &hint_is_invalid);
GotoIf(TaggedEqual(
CallBuiltin(Builtin::kStringEqual, context, hint, number_string),
TrueConstant()),
&hint_is_number);
GotoIf(TaggedEqual(
CallBuiltin(Builtin::kStringEqual, context, hint, default_string),
TrueConstant()),
&hint_is_string);
GotoIf(TaggedEqual(
CallBuiltin(Builtin::kStringEqual, context, hint, string_string),
TrueConstant()),
&hint_is_string);
TNode<IntPtrT> hint_length = LoadStringLengthAsWord(CAST(hint));
GotoIfStringEqual(CAST(hint), hint_length, number_string, &hint_is_number);
GotoIfStringEqual(CAST(hint), hint_length, default_string, &hint_is_string);
GotoIfStringEqual(CAST(hint), hint_length, string_string, &hint_is_string);
Goto(&hint_is_invalid);
// Use the OrdinaryToPrimitive builtin to convert to a Number.

View File

@ -160,7 +160,7 @@ namespace internal {
\
/* String helpers */ \
TFC(StringFromCodePointAt, StringAtAsString) \
TFC(StringEqual, Compare) \
TFC(StringEqual, StringEqual) \
TFC(StringGreaterThan, Compare) \
TFC(StringGreaterThanOrEqual, Compare) \
TFC(StringLessThan, Compare) \
@ -198,7 +198,6 @@ namespace internal {
\
/* Maglev Compiler */ \
ASM(MaglevOnStackReplacement, OnStackReplacement) \
ASM(MaglevOutOfLinePrologue, NoContext) \
\
/* Code life-cycle */ \
TFC(CompileLazy, JSTrampoline) \
@ -399,6 +398,7 @@ namespace internal {
CPP(ArrayShift) \
/* ES6 #sec-array.prototype.unshift */ \
CPP(ArrayUnshift) \
CPP(ArrayFromAsync) \
/* Support for Array.from and other array-copying idioms */ \
TFS(CloneFastJSArray, kSource) \
TFS(CloneFastJSArrayFillingHoles, kSource) \
@ -502,6 +502,7 @@ namespace internal {
/* DataView */ \
/* ES #sec-dataview-constructor */ \
CPP(DataViewConstructor) \
TFC(DataViewGetVariableLength, DataViewGetVariableLength) \
\
/* Date */ \
/* ES #sec-date-constructor */ \
@ -625,10 +626,6 @@ namespace internal {
CPP(JsonRawJson) \
CPP(JsonIsRawJson) \
\
/* Web snapshots */ \
CPP(WebSnapshotSerialize) \
CPP(WebSnapshotDeserialize) \
\
/* ICs */ \
TFH(LoadIC, LoadWithVector) \
TFH(LoadIC_Megamorphic, LoadWithVector) \
@ -655,9 +652,9 @@ namespace internal {
TFH(KeyedStoreIC, StoreWithVector) \
TFH(KeyedStoreICTrampoline, Store) \
TFH(KeyedStoreICBaseline, StoreBaseline) \
TFH(DefineKeyedOwnIC, StoreWithVector) \
TFH(DefineKeyedOwnICTrampoline, Store) \
TFH(DefineKeyedOwnICBaseline, StoreBaseline) \
TFH(DefineKeyedOwnIC, DefineKeyedOwnWithVector) \
TFH(DefineKeyedOwnICTrampoline, DefineKeyedOwn) \
TFH(DefineKeyedOwnICBaseline, DefineKeyedOwnBaseline) \
TFH(StoreInArrayLiteralIC, StoreWithVector) \
TFH(StoreInArrayLiteralICBaseline, StoreBaseline) \
TFH(LookupContextTrampoline, LookupTrampoline) \
@ -896,7 +893,8 @@ namespace internal {
kSpecifier, kExportName) \
TFJ(ShadowRealmImportValueFulfilled, kJSArgcReceiverSlots + 1, kReceiver, \
kExports) \
TFJ(ShadowRealmImportValueRejected, kDontAdaptArgumentsSentinel) \
TFJ(ShadowRealmImportValueRejected, kJSArgcReceiverSlots + 1, kReceiver, \
kException) \
\
/* SharedArrayBuffer */ \
CPP(SharedArrayBufferPrototypeGetByteLength) \
@ -1072,17 +1070,12 @@ namespace internal {
TFJ(AsyncIteratorValueUnwrap, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
\
/* CEntry */ \
ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit, \
CEntry1ArgvOnStack) \
ASM(CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit, CEntryDummy) \
ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
ASM(CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit, CEntryDummy) \
ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
ASM(CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit, CEntryDummy) \
ASM(CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit, CEntryDummy) \
ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
ASM(CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit, CEntryDummy) \
ASM(CEntry_Return1_ArgvInRegister_NoBuiltinExit, CEntryDummy) \
ASM(CEntry_Return1_ArgvOnStack_BuiltinExit, CEntry1ArgvOnStack) \
ASM(CEntry_Return1_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
ASM(CEntry_Return2_ArgvInRegister_NoBuiltinExit, CEntryDummy) \
ASM(CEntry_Return2_ArgvOnStack_BuiltinExit, CEntryDummy) \
ASM(CEntry_Return2_ArgvOnStack_NoBuiltinExit, CEntryDummy) \
ASM(DirectCEntry, CEntryDummy) \
\
/* String helpers */ \

View File

@ -5,10 +5,9 @@
#ifndef V8_BUILTINS_BUILTINS_DESCRIPTORS_H_
#define V8_BUILTINS_BUILTINS_DESCRIPTORS_H_
#include "src/builtins/builtins.h"
#include "src/builtins/builtins-definitions.h"
#include "src/codegen/interface-descriptors.h"
#include "src/compiler/code-assembler.h"
#include "src/objects/shared-function-info.h"
#include "src/common/globals.h"
namespace v8 {
namespace internal {
@ -17,7 +16,7 @@ namespace internal {
#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
struct Builtin_##Name##_InterfaceDescriptor { \
enum ParameterIndices { \
kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
kJSTarget = kJSCallClosureParameterIndex, \
##__VA_ARGS__, \
kJSNewTarget, \
kJSActualArgumentsCount, \

View File

@ -52,7 +52,7 @@ BUILTIN(ErrorCaptureStackTrace) {
// Explicitly check for frozen objects. Other access checks are performed by
// the LookupIterator in SetAccessor below.
if (!JSObject::IsExtensible(object)) {
if (!JSObject::IsExtensible(isolate, object)) {
return isolate->Throw(*isolate->factory()->NewTypeError(
MessageTemplate::kDefineDisallowed, name));
}

View File

@ -104,7 +104,7 @@ TF_BUILTIN(DebugBreakTrampoline, CodeStubAssembler) {
BIND(&tailcall_to_shared);
// Tail call into code object on the SharedFunctionInfo.
TNode<CodeT> code = GetSharedFunctionInfoCode(shared);
TNode<Code> code = GetSharedFunctionInfoCode(shared);
TailCallJSCode(code, context, function, new_target, arg_count);
}
@ -1230,9 +1230,8 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver));
const bool builtin_exit_frame = true;
TNode<CodeT> code =
HeapConstant(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
ArgvMode::kStack, builtin_exit_frame));
TNode<Code> code = HeapConstant(
CodeFactory::CEntry(isolate(), 1, ArgvMode::kStack, builtin_exit_frame));
// Unconditionally push argc, target and new target as extra stack arguments.
// They will be used by stack frame iterators when constructing stack trace.
@ -1304,56 +1303,34 @@ TF_BUILTIN(AbortCSADcheck, CodeStubAssembler) {
TailCallRuntime(Runtime::kAbortCSADcheck, NoContextConstant(), message);
}
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
void Builtins::Generate_CEntry_Return1_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false);
Generate_CEntry(masm, 1, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
void Builtins::Generate_CEntry_Return1_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
Generate_CEntry(masm, 1, ArgvMode::kStack, true);
}
void Builtins::
Generate_CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false);
}
void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
void Builtins::Generate_CEntry_Return1_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, false);
Generate_CEntry(masm, 1, ArgvMode::kRegister, false);
}
void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit(
void Builtins::Generate_CEntry_Return2_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
Generate_CEntry(masm, 2, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit(
void Builtins::Generate_CEntry_Return2_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false);
Generate_CEntry(masm, 2, ArgvMode::kStack, true);
}
void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit(
void Builtins::Generate_CEntry_Return2_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true);
}
void Builtins::
Generate_CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false);
}
void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, false);
}
void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit(
MacroAssembler* masm) {
Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true);
Generate_CEntry(masm, 2, ArgvMode::kRegister, false);
}
#if !defined(V8_TARGET_ARCH_ARM)
@ -1396,12 +1373,6 @@ void Builtins::Generate_MaglevOnStackReplacement(MacroAssembler* masm) {
static_assert(D::kParameterCount == 1);
masm->Trap();
}
void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) {
using D =
i::CallInterfaceDescriptorFor<Builtin::kMaglevOutOfLinePrologue>::type;
static_assert(D::kParameterCount == 0);
masm->Trap();
}
#endif // V8_TARGET_ARCH_X64
// ES6 [[Get]] operation.
@ -1591,7 +1562,7 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
// On failure, tail call back to regular JavaScript by re-calling the given
// function which has been reset to the compile lazy builtin.
TNode<CodeT> code = LoadJSFunctionCode(function);
TNode<Code> code = LoadJSFunctionCode(function);
TailCallJSCode(code, context, function, new_target, arg_count);
}

View File

@ -113,10 +113,6 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl(
ToLowerCaseKind kind, std::function<void(TNode<Object>)> ReturnFct) {
Label call_c(this), return_string(this), runtime(this, Label::kDeferred);
// Early exit on empty strings.
const TNode<Uint32T> length = LoadStringLengthAsWord32(string);
GotoIf(Word32Equal(length, Uint32Constant(0)), &return_string);
// Unpack strings if possible, and bail to runtime unless we get a one-byte
// flat string.
ToDirectStringAssembler to_direct(
@ -153,6 +149,10 @@ void IntlBuiltinsAssembler::ToLowerCaseImpl(
Bind(&fast);
}
// Early exit on empty string.
const TNode<Uint32T> length = LoadStringLengthAsWord32(string);
GotoIf(Word32Equal(length, Uint32Constant(0)), &return_string);
const TNode<Int32T> instance_type = to_direct.instance_type();
CSA_DCHECK(this,
Word32BinaryNot(IsIndirectStringInstanceType(instance_type)));

View File

@ -15,7 +15,7 @@ namespace v8 {
namespace internal {
void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
TNode<CodeT> code, TNode<JSFunction> function) {
TNode<Code> code, TNode<JSFunction> function) {
auto argc = UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
auto context = Parameter<Context>(Descriptor::kContext);
auto new_target = Parameter<Object>(Descriptor::kNewTarget);
@ -25,7 +25,7 @@ void LazyBuiltinsAssembler::GenerateTailCallToJSCode(
void LazyBuiltinsAssembler::GenerateTailCallToReturnedCode(
Runtime::FunctionId function_id, TNode<JSFunction> function) {
auto context = Parameter<Context>(Descriptor::kContext);
TNode<CodeT> code = CAST(CallRuntime(function_id, context, function));
TNode<Code> code = CAST(CallRuntime(function_id, context, function));
GenerateTailCallToJSCode(code, function);
}
@ -63,8 +63,8 @@ void LazyBuiltinsAssembler::MaybeTailCallOptimizedCodeSlot(
TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset);
// Optimized code slot is a weak reference to CodeT object.
TNode<CodeT> optimized_code = CAST(GetHeapObjectAssumeWeak(
// Optimized code slot is a weak reference to Code object.
TNode<Code> optimized_code = CAST(GetHeapObjectAssumeWeak(
maybe_optimized_code_entry, &heal_optimized_code_slot));
// Check if the optimized code is marked for deopt. If it is, call the
@ -100,7 +100,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(function, JSFunction::kSharedFunctionInfoOffset));
TVARIABLE(Uint16T, sfi_data_type);
TNode<CodeT> sfi_code =
TNode<Code> sfi_code =
GetSharedFunctionInfoCode(shared, &sfi_data_type, &compile_function);
TNode<HeapObject> feedback_cell_value = LoadFeedbackCellValue(function);
@ -124,22 +124,23 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
MaybeTailCallOptimizedCodeSlot(function, CAST(feedback_cell_value));
Goto(&maybe_use_sfi_code);
// At this point we have a candidate Code object. It's *not* a cached
// optimized Code object (we'd have tail-called it above). A usual case would
// be the InterpreterEntryTrampoline to start executing existing bytecode.
// At this point we have a candidate InstructionStream object. It's *not* a
// cached optimized InstructionStream object (we'd have tail-called it above).
// A usual case would be the InterpreterEntryTrampoline to start executing
// existing bytecode.
BIND(&maybe_use_sfi_code);
Label tailcall_code(this), baseline(this);
TVARIABLE(CodeT, code);
TVARIABLE(Code, code);
// Check if we have baseline code.
GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODET_TYPE), &baseline);
GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODE_TYPE), &baseline);
code = sfi_code;
Goto(&tailcall_code);
BIND(&baseline);
// Ensure we have a feedback vector.
code = Select<CodeT>(
code = Select<Code>(
IsFeedbackVector(feedback_cell_value), [=]() { return sfi_code; },
[=]() {
return CAST(CallRuntime(Runtime::kInstallBaselineCode,
@ -164,7 +165,7 @@ TF_BUILTIN(CompileLazy, LazyBuiltinsAssembler) {
TF_BUILTIN(CompileLazyDeoptimizedCode, LazyBuiltinsAssembler) {
auto function = Parameter<JSFunction>(Descriptor::kTarget);
TNode<CodeT> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
TNode<Code> code = HeapConstant(BUILTIN_CODE(isolate(), CompileLazy));
// Set the code slot inside the JSFunction to CompileLazy.
StoreObjectField(function, JSFunction::kCodeOffset, code);
GenerateTailCallToJSCode(code, function);

View File

@ -17,7 +17,7 @@ class LazyBuiltinsAssembler : public CodeStubAssembler {
explicit LazyBuiltinsAssembler(compiler::CodeAssemblerState* state)
: CodeStubAssembler(state) {}
void GenerateTailCallToJSCode(TNode<CodeT> code, TNode<JSFunction> function);
void GenerateTailCallToJSCode(TNode<Code> code, TNode<JSFunction> function);
void GenerateTailCallToReturnedCode(Runtime::FunctionId function_id,
TNode<JSFunction> function);

View File

@ -148,11 +148,7 @@ Object ObjectLookupAccessor(Isolate* isolate, Handle<Object> object,
}
return ObjectLookupAccessor(isolate, prototype, key, component);
}
case LookupIterator::WASM_OBJECT:
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kWasmObjectsAreOpaque));
case LookupIterator::INTEGER_INDEXED_EXOTIC:
case LookupIterator::DATA:
return ReadOnlyRoots(isolate).undefined_value();
@ -219,9 +215,10 @@ BUILTIN(ObjectFreeze) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
if (object->IsJSReceiver()) {
MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
FROZEN, kThrowOnError),
ReadOnlyRoots(isolate).exception());
MAYBE_RETURN(
JSReceiver::SetIntegrityLevel(isolate, Handle<JSReceiver>::cast(object),
FROZEN, kThrowOnError),
ReadOnlyRoots(isolate).exception());
}
return *object;
}
@ -299,10 +296,11 @@ BUILTIN(ObjectGetOwnPropertySymbols) {
BUILTIN(ObjectIsFrozen) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
Maybe<bool> result = object->IsJSReceiver()
? JSReceiver::TestIntegrityLevel(
Handle<JSReceiver>::cast(object), FROZEN)
: Just(true);
Maybe<bool> result =
object->IsJSReceiver()
? JSReceiver::TestIntegrityLevel(
isolate, Handle<JSReceiver>::cast(object), FROZEN)
: Just(true);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@ -311,10 +309,11 @@ BUILTIN(ObjectIsFrozen) {
BUILTIN(ObjectIsSealed) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
Maybe<bool> result = object->IsJSReceiver()
? JSReceiver::TestIntegrityLevel(
Handle<JSReceiver>::cast(object), SEALED)
: Just(true);
Maybe<bool> result =
object->IsJSReceiver()
? JSReceiver::TestIntegrityLevel(
isolate, Handle<JSReceiver>::cast(object), SEALED)
: Just(true);
MAYBE_RETURN(result, ReadOnlyRoots(isolate).exception());
return isolate->heap()->ToBoolean(result.FromJust());
}
@ -360,9 +359,10 @@ BUILTIN(ObjectSeal) {
HandleScope scope(isolate);
Handle<Object> object = args.atOrUndefined(isolate, 1);
if (object->IsJSReceiver()) {
MAYBE_RETURN(JSReceiver::SetIntegrityLevel(Handle<JSReceiver>::cast(object),
SEALED, kThrowOnError),
ReadOnlyRoots(isolate).exception());
MAYBE_RETURN(
JSReceiver::SetIntegrityLevel(isolate, Handle<JSReceiver>::cast(object),
SEALED, kThrowOnError),
ReadOnlyRoots(isolate).exception());
}
return *object;
}

View File

@ -45,49 +45,6 @@ TNode<IntPtrT> RegExpBuiltinsAssembler::IntPtrZero() {
return IntPtrConstant(0);
}
// If code is a builtin, return the address to the (possibly embedded) builtin
// code entry, otherwise return the entry of the code object itself.
TNode<RawPtrT> RegExpBuiltinsAssembler::LoadCodeObjectEntry(TNode<CodeT> code) {
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
// When external code space is enabled we can load the entry point directly
// from the CodeT object.
return GetCodeEntry(code);
}
TVARIABLE(RawPtrT, var_result);
Label if_code_is_off_heap(this), out(this);
TNode<Int32T> builtin_index =
LoadObjectField<Int32T>(code, Code::kBuiltinIndexOffset);
{
GotoIfNot(
Word32Equal(builtin_index,
Int32Constant(static_cast<int>(Builtin::kNoBuiltinId))),
&if_code_is_off_heap);
var_result = ReinterpretCast<RawPtrT>(
IntPtrAdd(BitcastTaggedToWord(code),
IntPtrConstant(Code::kHeaderSize - kHeapObjectTag)));
Goto(&out);
}
BIND(&if_code_is_off_heap);
{
TNode<IntPtrT> builtin_entry_offset_from_isolate_root =
IntPtrAdd(IntPtrConstant(IsolateData::builtin_entry_table_offset()),
ChangeInt32ToIntPtr(Word32Shl(
builtin_index, Int32Constant(kSystemPointerSizeLog2))));
var_result = ReinterpretCast<RawPtrT>(
Load(MachineType::Pointer(),
ExternalConstant(ExternalReference::isolate_root(isolate())),
builtin_entry_offset_from_isolate_root));
Goto(&out);
}
BIND(&out);
return var_result.value();
}
// -----------------------------------------------------------------------------
// ES6 section 21.2 RegExp Objects
@ -559,7 +516,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
#endif
GotoIf(TaggedIsSmi(var_code.value()), &runtime);
TNode<CodeT> code = CAST(var_code.value());
TNode<Code> code = CAST(var_code.value());
Label if_success(this), if_exception(this, Label::kDeferred);
{
@ -623,7 +580,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
MachineType arg8_type = type_tagged;
TNode<JSRegExp> arg8 = regexp;
TNode<RawPtrT> code_entry = LoadCodeObjectEntry(code);
TNode<RawPtrT> code_entry = GetCodeEntry(code);
// AIX uses function descriptors on CFunction calls. code_entry in this case
// may also point to a Regex interpreter entry trampoline which does not
@ -1066,13 +1023,13 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
BIND(&next); \
} while (false)
CASE_FOR_FLAG("hasIndices", JSRegExp::kHasIndices);
CASE_FOR_FLAG("global", JSRegExp::kGlobal);
CASE_FOR_FLAG("ignoreCase", JSRegExp::kIgnoreCase);
CASE_FOR_FLAG("multiline", JSRegExp::kMultiline);
CASE_FOR_FLAG("dotAll", JSRegExp::kDotAll);
CASE_FOR_FLAG("unicode", JSRegExp::kUnicode);
CASE_FOR_FLAG("sticky", JSRegExp::kSticky);
CASE_FOR_FLAG("hasIndices", JSRegExp::kHasIndices);
#undef CASE_FOR_FLAG
#define CASE_FOR_FLAG(NAME, V8_FLAG_EXTERN_REF, FLAG) \

View File

@ -21,8 +21,6 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler {
TNode<Smi> SmiZero();
TNode<IntPtrT> IntPtrZero();
TNode<RawPtrT> LoadCodeObjectEntry(TNode<CodeT> code);
// Allocate either a JSRegExpResult or a JSRegExpResultWithIndices (depending
// on has_indices) with the given length (the number of captures, including
// the match itself), index (the index where the match starts), and input

View File

@ -38,6 +38,9 @@ class ShadowRealmBuiltinsAssembler : public CodeStubAssembler {
TNode<JSFunction> AllocateImportValueFulfilledFunction(
TNode<NativeContext> caller_context, TNode<NativeContext> eval_context,
TNode<String> specifier, TNode<String> export_name);
void ShadowRealmThrow(TNode<Context> context,
MessageTemplate fallback_message,
TNode<Object> exception);
};
TNode<JSObject> ShadowRealmBuiltinsAssembler::AllocateJSWrappedFunction(
@ -97,6 +100,14 @@ void ShadowRealmBuiltinsAssembler::CheckAccessor(TNode<DescriptorArray> array,
GotoIfNot(IsAccessorInfo(CAST(value)), bailout);
}
void ShadowRealmBuiltinsAssembler::ShadowRealmThrow(
TNode<Context> context, MessageTemplate fallback_message,
TNode<Object> exception) {
TNode<Smi> template_index = SmiConstant(static_cast<int>(fallback_message));
CallRuntime(Runtime::kShadowRealmThrow, context, template_index, exception);
Unreachable();
}
// https://tc39.es/proposal-shadowrealm/#sec-getwrappedvalue
TF_BUILTIN(ShadowRealmGetWrappedValue, ShadowRealmBuiltinsAssembler) {
auto context = Parameter<Context>(Descriptor::kContext);
@ -285,11 +296,8 @@ TF_BUILTIN(CallWrappedFunction, ShadowRealmBuiltinsAssembler) {
// 11. Else,
BIND(&call_exception);
// 11a. Throw a TypeError exception.
// TODO(v8:11989): provide a non-observable inspection on the
// pending_exception to the newly created TypeError.
// https://github.com/tc39/proposal-shadowrealm/issues/353
ThrowTypeError(context, MessageTemplate::kCallShadowRealmFunctionThrown,
var_exception.value());
ShadowRealmThrow(context, MessageTemplate::kCallWrappedFunctionThrew,
var_exception.value());
BIND(&target_not_callable);
// A wrapped value should not be non-callable.
@ -416,10 +424,9 @@ TF_BUILTIN(ShadowRealmImportValueFulfilled, ShadowRealmBuiltinsAssembler) {
TF_BUILTIN(ShadowRealmImportValueRejected, ShadowRealmBuiltinsAssembler) {
TNode<Context> context = Parameter<Context>(Descriptor::kContext);
// TODO(v8:11989): provide a non-observable inspection on the
// pending_exception to the newly created TypeError.
// https://github.com/tc39/proposal-shadowrealm/issues/353
ThrowTypeError(context, MessageTemplate::kImportShadowRealmRejected);
TNode<Object> exception = Parameter<Object>(Descriptor::kException);
ShadowRealmThrow(context, MessageTemplate::kImportShadowRealmRejected,
exception);
}
} // namespace internal

View File

@ -202,11 +202,11 @@ BUILTIN(ShadowRealmPrototypeEvaluate) {
*factory->NewError(isolate->syntax_error_function(), message));
}
// 21. If result.[[Type]] is not normal, throw a TypeError exception.
// TODO(v8:11989): provide a non-observable inspection on the
// pending_exception to the newly created TypeError.
// https://github.com/tc39/proposal-shadowrealm/issues/353
Handle<String> string =
Object::NoSideEffectsToString(isolate, pending_exception);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kCallShadowRealmFunctionThrown));
isolate,
NewTypeError(MessageTemplate::kCallShadowRealmEvaluateThrew, string));
}
// 22. Return ? GetWrappedValue(callerRealm, result.[[Value]]).
Handle<Object> wrapped_result;

View File

@ -124,17 +124,15 @@ TNode<IntPtrT> StringBuiltinsAssembler::SearchOneByteInOneByteString(
}
void StringBuiltinsAssembler::GenerateStringEqual(TNode<String> left,
TNode<String> right) {
TNode<String> right,
TNode<IntPtrT> length) {
TVARIABLE(String, var_left, left);
TVARIABLE(String, var_right, right);
Label if_equal(this), if_notequal(this), if_indirect(this, Label::kDeferred),
restart(this, {&var_left, &var_right});
TNode<IntPtrT> lhs_length = LoadStringLengthAsWord(left);
TNode<IntPtrT> rhs_length = LoadStringLengthAsWord(right);
// Strings with different lengths cannot be equal.
GotoIf(WordNotEqual(lhs_length, rhs_length), &if_notequal);
CSA_DCHECK(this, IntPtrEqual(LoadStringLengthAsWord(left), length));
CSA_DCHECK(this, IntPtrEqual(LoadStringLengthAsWord(right), length));
Goto(&restart);
BIND(&restart);
@ -144,7 +142,7 @@ void StringBuiltinsAssembler::GenerateStringEqual(TNode<String> left,
TNode<Uint16T> lhs_instance_type = LoadInstanceType(lhs);
TNode<Uint16T> rhs_instance_type = LoadInstanceType(rhs);
StringEqual_Core(lhs, lhs_instance_type, rhs, rhs_instance_type, lhs_length,
StringEqual_Core(lhs, lhs_instance_type, rhs, rhs_instance_type, length,
&if_equal, &if_notequal, &if_indirect);
BIND(&if_indirect);
@ -199,22 +197,42 @@ void StringBuiltinsAssembler::StringEqual_Core(
Int32Constant(0)),
if_indirect);
Label if_skip_fast_case(this), if_fast_case(this), if_oneonebytestring(this),
if_twotwobytestring(this), if_onetwobytestring(this),
if_twoonebytestring(this);
// Dispatch based on the {lhs} and {rhs} string encoding.
int const kBothStringEncodingMask =
kStringEncodingMask | (kStringEncodingMask << 8);
int const kBothExternalStringTag =
kExternalStringTag | (kExternalStringTag << 8);
int const kOneOneByteStringTag = kOneByteStringTag | (kOneByteStringTag << 8);
int const kTwoTwoByteStringTag = kTwoByteStringTag | (kTwoByteStringTag << 8);
int const kOneTwoByteStringTag = kOneByteStringTag | (kTwoByteStringTag << 8);
Label if_oneonebytestring(this), if_twotwobytestring(this),
if_onetwobytestring(this), if_twoonebytestring(this);
TNode<Word32T> masked_instance_types =
Word32And(both_instance_types, Int32Constant(kBothStringEncodingMask));
GotoIf(
Word32Equal(masked_instance_types, Int32Constant(kOneOneByteStringTag)),
&if_oneonebytestring);
GotoIf(
Word32Equal(masked_instance_types, Int32Constant(kTwoTwoByteStringTag)),
&if_twotwobytestring);
TNode<Word32T> both_are_one_byte =
Word32Equal(masked_instance_types, Int32Constant(kOneOneByteStringTag));
TNode<Word32T> both_are_two_byte =
Word32Equal(masked_instance_types, Int32Constant(kTwoTwoByteStringTag));
// If both strings are not external we know that their payload length is
// kTagged sized. When they have the same type we can compare in chunks. The
// padding bytes are set to zero.
GotoIf(Word32And(both_instance_types, Int32Constant(kBothExternalStringTag)),
&if_skip_fast_case);
TVARIABLE(IntPtrT, byte_length, length);
GotoIf(both_are_one_byte, &if_fast_case);
byte_length = WordShl(byte_length.value(), IntPtrConstant(1));
Branch(both_are_two_byte, &if_fast_case, &if_skip_fast_case);
BIND(&if_fast_case);
StringEqual_FastLoop(lhs, lhs_instance_type, rhs, rhs_instance_type,
byte_length.value(), if_equal, if_not_equal);
BIND(&if_skip_fast_case);
GotoIf(both_are_one_byte, &if_oneonebytestring);
GotoIf(both_are_two_byte, &if_twotwobytestring);
Branch(
Word32Equal(masked_instance_types, Int32Constant(kOneTwoByteStringTag)),
&if_onetwobytestring, &if_twoonebytestring);
@ -240,6 +258,87 @@ void StringBuiltinsAssembler::StringEqual_Core(
if_not_equal);
}
void StringBuiltinsAssembler::StringEqual_FastLoop(
TNode<String> lhs, TNode<Word32T> lhs_instance_type, TNode<String> rhs,
TNode<Word32T> rhs_instance_type, TNode<IntPtrT> byte_length,
Label* if_equal, Label* if_not_equal) {
TNode<RawPtrT> lhs_data = DirectStringData(lhs, lhs_instance_type);
TNode<RawPtrT> rhs_data = DirectStringData(rhs, rhs_instance_type);
constexpr int kChunk =
kObjectAlignment % ElementSizeInBytes(MachineRepresentation::kWord64) == 0
? ElementSizeInBytes(MachineRepresentation::kWord64)
: ElementSizeInBytes(MachineRepresentation::kWord32);
static_assert(kObjectAlignment % kChunk == 0);
// Round up the byte_length to `ceiling(length / kChunk) * kChunk`
TNode<IntPtrT> rounded_up_len = UncheckedCast<IntPtrT>(WordAnd(
UncheckedCast<WordT>(IntPtrAdd(byte_length, IntPtrConstant(kChunk - 1))),
UncheckedCast<WordT>(IntPtrConstant(~(kChunk - 1)))));
#ifdef ENABLE_SLOW_DCHECKS
// The padding must me zeroed for chunked comparison to be correct. This loop
// checks all bytes being 0 from byte_length up to rounded_up_len.
{
TVARIABLE(IntPtrT, var_padding_offset, byte_length);
Label loop(this, &var_padding_offset), loop_end(this);
Goto(&loop);
BIND(&loop);
{
GotoIf(WordEqual(var_padding_offset.value(), rounded_up_len), &loop_end);
// Load the next byte
TNode<Word32T> lhs_value = UncheckedCast<Word32T>(Load(
MachineType::Uint8(), lhs_data,
WordShl(var_padding_offset.value(),
ElementSizeLog2Of(MachineType::Uint8().representation()))));
TNode<Word32T> rhs_value = UncheckedCast<Word32T>(Load(
MachineType::Uint8(), rhs_data,
WordShl(var_padding_offset.value(),
ElementSizeLog2Of(MachineType::Uint8().representation()))));
// Check the padding is zero.
CSA_CHECK(this, Word32Equal(lhs_value, Int32Constant(0)));
CSA_CHECK(this, Word32Equal(rhs_value, Int32Constant(0)));
// Advance to next byte.
var_padding_offset =
IntPtrAdd(var_padding_offset.value(), IntPtrConstant(1));
Goto(&loop);
}
BIND(&loop_end);
}
#endif // ENABLE_SLOW_DCHECKS
// Compare strings in chunks of either 4 or 8 bytes, depending on the
// alignment of allocations.
static_assert(kChunk == ElementSizeInBytes(MachineRepresentation::kWord64) ||
kChunk == ElementSizeInBytes(MachineRepresentation::kWord32));
if (kChunk == ElementSizeInBytes(MachineRepresentation::kWord64)) {
BuildFastLoop<IntPtrT>(
IntPtrConstant(0), rounded_up_len,
[&](TNode<IntPtrT> index) {
TNode<Word64T> lhs_value = UncheckedCast<Word64T>(
Load(MachineType::Uint64(), lhs_data, index));
TNode<Word64T> rhs_value = UncheckedCast<Word64T>(
Load(MachineType::Uint64(), rhs_data, index));
GotoIf(Word64NotEqual(lhs_value, rhs_value), if_not_equal);
},
kChunk, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
} else {
BuildFastLoop<IntPtrT>(
IntPtrConstant(0), rounded_up_len,
[&](TNode<IntPtrT> index) {
TNode<Word32T> lhs_value = UncheckedCast<Word32T>(
Load(MachineType::Uint32(), lhs_data, index));
TNode<Word32T> rhs_value = UncheckedCast<Word32T>(
Load(MachineType::Uint32(), rhs_data, index));
GotoIf(Word32NotEqual(lhs_value, rhs_value), if_not_equal);
},
kChunk, LoopUnrollingMode::kYes, IndexAdvanceMode::kPost);
}
Goto(if_equal);
}
void StringBuiltinsAssembler::StringEqual_Loop(
TNode<String> lhs, TNode<Word32T> lhs_instance_type, MachineType lhs_type,
TNode<String> rhs, TNode<Word32T> rhs_instance_type, MachineType rhs_type,
@ -716,7 +815,8 @@ void StringBuiltinsAssembler::GenerateStringRelationalComparison(
TF_BUILTIN(StringEqual, StringBuiltinsAssembler) {
auto left = Parameter<String>(Descriptor::kLeft);
auto right = Parameter<String>(Descriptor::kRight);
GenerateStringEqual(left, right);
auto length = UncheckedParameter<IntPtrT>(Descriptor::kLength);
GenerateStringEqual(left, right, length);
}
TF_BUILTIN(StringLessThan, StringBuiltinsAssembler) {
@ -993,19 +1093,38 @@ TF_BUILTIN(StringPrototypeReplace, StringBuiltinsAssembler) {
RequireObjectCoercible(context, receiver, "String.prototype.replace");
// Redirect to replacer method if {search[@@replace]} is not undefined.
{
Label next(this);
Label check_for_replace(this);
MaybeCallFunctionAtSymbol(
context, search, receiver, isolate()->factory()->replace_symbol(),
DescriptorIndexNameValue{JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
RootIndex::kreplace_symbol,
Context::REGEXP_REPLACE_FUNCTION_INDEX},
[=]() {
Return(CallBuiltin(Builtin::kRegExpReplace, context, search, receiver,
replace));
},
[=](TNode<Object> fn) {
Return(Call(context, fn, search, receiver, replace));
});
// The protector guarantees that that the Number and String wrapper
// prototypes do not contain Symbol.replace (aka. @@replace).
GotoIf(IsNumberStringPrototypeNoReplaceProtectorCellInvalid(),
&check_for_replace);
// Smi is safe thanks to the protector.
GotoIf(TaggedIsSmi(search), &next);
// String is safe thanks to the protector.
GotoIf(IsString(CAST(search)), &next);
// HeapNumber is safe thanks to the protector.
Branch(IsHeapNumber(CAST(search)), &next, &check_for_replace);
BIND(&check_for_replace);
MaybeCallFunctionAtSymbol(
context, search, receiver, isolate()->factory()->replace_symbol(),
DescriptorIndexNameValue{
JSRegExp::kSymbolReplaceFunctionDescriptorIndex,
RootIndex::kreplace_symbol, Context::REGEXP_REPLACE_FUNCTION_INDEX},
[=]() {
Return(CallBuiltin(Builtin::kRegExpReplace, context, search, receiver,
replace));
},
[=](TNode<Object> fn) {
Return(Call(context, fn, search, receiver, replace));
});
Goto(&next);
BIND(&next);
}
// Convert {receiver} and {search} to strings.
@ -1492,6 +1611,61 @@ TNode<Int32T> StringBuiltinsAssembler::LoadSurrogatePairAt(
return var_result.value();
}
TNode<BoolT> StringBuiltinsAssembler::HasUnpairedSurrogate(TNode<String> string,
Label* if_indirect) {
TNode<Uint16T> instance_type = LoadInstanceType(string);
CSA_DCHECK(this, Word32Equal(Word32And(instance_type,
Int32Constant(kStringEncodingMask)),
Int32Constant(kTwoByteStringTag)));
GotoIfNot(Word32Equal(Word32And(instance_type,
Int32Constant(kIsIndirectStringMask |
kUncachedExternalStringMask)),
Int32Constant(0)),
if_indirect);
TNode<RawPtrT> string_data = DirectStringData(string, instance_type);
TNode<IntPtrT> length = LoadStringLengthAsWord(string);
const TNode<ExternalReference> has_unpaired_surrogate =
ExternalConstant(ExternalReference::has_unpaired_surrogate());
return UncheckedCast<BoolT>(
CallCFunction(has_unpaired_surrogate, MachineType::Uint32(),
std::make_pair(MachineType::Pointer(), string_data),
std::make_pair(MachineType::IntPtr(), length)));
}
void StringBuiltinsAssembler::ReplaceUnpairedSurrogates(TNode<String> source,
TNode<String> dest,
Label* if_indirect) {
TNode<Uint16T> source_instance_type = LoadInstanceType(source);
CSA_DCHECK(this, Word32Equal(Word32And(source_instance_type,
Int32Constant(kStringEncodingMask)),
Int32Constant(kTwoByteStringTag)));
GotoIfNot(Word32Equal(Word32And(source_instance_type,
Int32Constant(kIsIndirectStringMask |
kUncachedExternalStringMask)),
Int32Constant(0)),
if_indirect);
TNode<RawPtrT> source_data = DirectStringData(source, source_instance_type);
// The destination string is a freshly allocated SeqString, and so is always
// direct.
TNode<Uint16T> dest_instance_type = LoadInstanceType(dest);
CSA_DCHECK(this, Word32Equal(Word32And(dest_instance_type,
Int32Constant(kStringEncodingMask)),
Int32Constant(kTwoByteStringTag)));
TNode<RawPtrT> dest_data = DirectStringData(dest, dest_instance_type);
TNode<IntPtrT> length = LoadStringLengthAsWord(source);
CSA_DCHECK(this, IntPtrEqual(length, LoadStringLengthAsWord(dest)));
const TNode<ExternalReference> replace_unpaired_surrogates =
ExternalConstant(ExternalReference::replace_unpaired_surrogates());
CallCFunction(replace_unpaired_surrogates, MachineType::Pointer(),
std::make_pair(MachineType::Pointer(), source_data),
std::make_pair(MachineType::Pointer(), dest_data),
std::make_pair(MachineType::IntPtr(), length));
}
void StringBuiltinsAssembler::BranchIfStringPrimitiveWithNoCustomIteration(
TNode<Object> object, TNode<Context> context, Label* if_true,
Label* if_false) {

View File

@ -33,6 +33,10 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
TNode<Int32T> LoadSurrogatePairAt(TNode<String> string, TNode<IntPtrT> length,
TNode<IntPtrT> index,
UnicodeEncoding encoding);
TNode<BoolT> HasUnpairedSurrogate(TNode<String> string, Label* if_indirect);
void ReplaceUnpairedSurrogates(TNode<String> source, TNode<String> dest,
Label* if_indirect);
TNode<String> StringFromSingleUTF16EncodedCodePoint(TNode<Int32T> codepoint);
@ -85,6 +89,10 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
const TNode<RawPtrT> search_ptr, const TNode<IntPtrT> start_position);
protected:
void StringEqual_FastLoop(TNode<String> lhs, TNode<Word32T> lhs_instance_type,
TNode<String> rhs, TNode<Word32T> rhs_instance_type,
TNode<IntPtrT> byte_length, Label* if_equal,
Label* if_not_equal);
void StringEqual_Loop(TNode<String> lhs, TNode<Word32T> lhs_instance_type,
MachineType lhs_type, TNode<String> rhs,
TNode<Word32T> rhs_instance_type, MachineType rhs_type,
@ -100,7 +108,8 @@ class StringBuiltinsAssembler : public CodeStubAssembler {
const TNode<IntPtrT> search_length,
const TNode<IntPtrT> start_position);
void GenerateStringEqual(TNode<String> left, TNode<String> right);
void GenerateStringEqual(TNode<String> left, TNode<String> right,
TNode<IntPtrT> length);
void GenerateStringRelationalComparison(TNode<String> left,
TNode<String> right, Operation op);

View File

@ -133,6 +133,8 @@ BUILTIN(SharedStructTypeConstructor) {
instance_map->set_is_extensible(false);
JSFunction::SetInitialMap(isolate, constructor, instance_map,
factory->null_value(), factory->null_value());
constructor->map().SetConstructor(ReadOnlyRoots(isolate).null_value());
constructor->map().set_has_non_instance_prototype(true);
// Pre-create the enum cache in the shared space, as otherwise for-in
// enumeration will incorrectly create an enum cache in the per-thread heap.

View File

@ -1,119 +0,0 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/builtins/builtins-utils-inl.h"
#include "src/builtins/builtins.h"
#include "src/logging/counters.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/js-array-inl.h"
#include "src/objects/objects-inl.h"
#include "src/web-snapshot/web-snapshot.h"
namespace v8 {
namespace internal {
BUILTIN(WebSnapshotSerialize) {
HandleScope scope(isolate);
if (args.length() < 2 || args.length() > 3) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument));
}
Handle<Object> object = args.at(1);
Handle<FixedArray> block_list = isolate->factory()->empty_fixed_array();
Handle<JSArray> block_list_js_array;
if (args.length() == 3) {
if (!args[2].IsJSArray()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument));
}
block_list_js_array = args.at<JSArray>(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, block_list,
JSReceiver::GetOwnValues(isolate, block_list_js_array,
PropertyFilter::ENUMERABLE_STRINGS));
}
auto snapshot_data = std::make_shared<WebSnapshotData>();
WebSnapshotSerializer serializer(isolate);
if (!serializer.TakeSnapshot(object, block_list, *snapshot_data)) {
DCHECK(isolate->has_pending_exception());
return ReadOnlyRoots(isolate).exception();
}
if (!block_list_js_array.is_null() &&
static_cast<uint32_t>(block_list->length()) <
serializer.external_object_count()) {
Handle<FixedArray> externals = serializer.GetExternals();
Handle<Map> map = JSObject::GetElementsTransitionMap(block_list_js_array,
PACKED_ELEMENTS);
block_list_js_array->set_elements(*externals);
block_list_js_array->set_length(Smi::FromInt(externals->length()));
block_list_js_array->set_map(*map);
}
MaybeHandle<JSArrayBuffer> maybe_result =
isolate->factory()->NewJSArrayBufferAndBackingStore(
snapshot_data->buffer_size, InitializedFlag::kUninitialized);
Handle<JSArrayBuffer> result;
if (!maybe_result.ToHandle(&result)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kOutOfMemory,
isolate->factory()->NewStringFromAsciiChecked(
"WebSnapshotSerialize")));
}
uint8_t* data =
reinterpret_cast<uint8_t*>(result->GetBackingStore()->buffer_start());
memcpy(data, snapshot_data->buffer, snapshot_data->buffer_size);
return *result;
}
BUILTIN(WebSnapshotDeserialize) {
HandleScope scope(isolate);
if (args.length() < 2 || args.length() > 3) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument));
}
if (!args[1].IsJSArrayBuffer()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument));
}
auto buffer = args.at<JSArrayBuffer>(1);
std::shared_ptr<BackingStore> backing_store = buffer->GetBackingStore();
if (backing_store.get() == nullptr) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument));
}
const uint8_t* data =
reinterpret_cast<uint8_t*>(backing_store->buffer_start());
Handle<FixedArray> injected_references =
isolate->factory()->empty_fixed_array();
if (args.length() == 3) {
if (!args[2].IsJSArray()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kInvalidArgument));
}
auto js_array = args.at<JSArray>(2);
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, injected_references,
JSReceiver::GetOwnValues(isolate, js_array,
PropertyFilter::ENUMERABLE_STRINGS));
}
WebSnapshotDeserializer deserializer(reinterpret_cast<v8::Isolate*>(isolate),
data, backing_store->byte_length());
if (!deserializer.Deserialize(injected_references)) {
DCHECK(isolate->has_pending_exception());
return ReadOnlyRoots(isolate).exception();
}
Handle<Object> object;
if (!deserializer.value().ToHandle(&object)) {
return ReadOnlyRoots(isolate).undefined_value();
}
return *object;
}
} // namespace internal
} // namespace v8

View File

@ -121,7 +121,7 @@ const char* Builtins::Lookup(Address pc) {
return nullptr;
}
Handle<CodeT> Builtins::CallFunction(ConvertReceiverMode mode) {
Handle<Code> Builtins::CallFunction(ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kNullOrUndefined:
return code_handle(Builtin::kCallFunction_ReceiverIsNullOrUndefined);
@ -133,7 +133,7 @@ Handle<CodeT> Builtins::CallFunction(ConvertReceiverMode mode) {
UNREACHABLE();
}
Handle<CodeT> Builtins::Call(ConvertReceiverMode mode) {
Handle<Code> Builtins::Call(ConvertReceiverMode mode) {
switch (mode) {
case ConvertReceiverMode::kNullOrUndefined:
return code_handle(Builtin::kCall_ReceiverIsNullOrUndefined);
@ -145,7 +145,7 @@ Handle<CodeT> Builtins::Call(ConvertReceiverMode mode) {
UNREACHABLE();
}
Handle<CodeT> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
Handle<Code> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
switch (hint) {
case ToPrimitiveHint::kDefault:
return code_handle(Builtin::kNonPrimitiveToPrimitive_Default);
@ -157,7 +157,7 @@ Handle<CodeT> Builtins::NonPrimitiveToPrimitive(ToPrimitiveHint hint) {
UNREACHABLE();
}
Handle<CodeT> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
Handle<Code> Builtins::OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint) {
switch (hint) {
case OrdinaryToPrimitiveHint::kNumber:
return code_handle(Builtin::kOrdinaryToPrimitive_Number);
@ -179,21 +179,21 @@ FullObjectSlot Builtins::builtin_tier0_slot(Builtin builtin) {
return FullObjectSlot(location);
}
void Builtins::set_code(Builtin builtin, CodeT code) {
void Builtins::set_code(Builtin builtin, Code code) {
DCHECK_EQ(builtin, code.builtin_id());
DCHECK(Internals::HasHeapObjectTag(code.ptr()));
// The given builtin may be uninitialized thus we cannot check its type here.
isolate_->builtin_table()[Builtins::ToInt(builtin)] = code.ptr();
}
CodeT Builtins::code(Builtin builtin) {
Code Builtins::code(Builtin builtin) {
Address ptr = isolate_->builtin_table()[Builtins::ToInt(builtin)];
return CodeT::cast(Object(ptr));
return Code::cast(Object(ptr));
}
Handle<CodeT> Builtins::code_handle(Builtin builtin) {
Handle<Code> Builtins::code_handle(Builtin builtin) {
Address* location = &isolate_->builtin_table()[Builtins::ToInt(builtin)];
return Handle<CodeT>(location);
return Handle<Code>(location);
}
// static
@ -229,7 +229,7 @@ CallInterfaceDescriptor Builtins::CallInterfaceDescriptorFor(Builtin builtin) {
// static
Callable Builtins::CallableFor(Isolate* isolate, Builtin builtin) {
Handle<CodeT> code = isolate->builtins()->code_handle(builtin);
Handle<Code> code = isolate->builtins()->code_handle(builtin);
return Callable{code, CallInterfaceDescriptorFor(builtin)};
}
@ -256,7 +256,7 @@ void Builtins::PrintBuiltinCode() {
base::CStrVector(v8_flags.print_builtin_code_filter))) {
CodeTracer::Scope trace_scope(isolate_->GetCodeTracer());
OFStream os(trace_scope.file());
CodeT builtin_code = code(builtin);
Code builtin_code = code(builtin);
builtin_code.Disassemble(builtin_name, os, isolate_);
os << "\n";
}
@ -270,7 +270,7 @@ void Builtins::PrintBuiltinSize() {
++builtin) {
const char* builtin_name = name(builtin);
const char* kind = KindNameOf(builtin);
CodeT code = Builtins::code(builtin);
Code code = Builtins::code(builtin);
PrintF(stdout, "%s Builtin, %s, %d\n", kind, builtin_name,
code.InstructionSize());
}
@ -283,7 +283,7 @@ Address Builtins::CppEntryOf(Builtin builtin) {
}
// static
bool Builtins::IsBuiltin(const Code code) {
bool Builtins::IsBuiltin(const InstructionStream code) {
return Builtins::IsBuiltinId(code.builtin_id());
}
@ -298,6 +298,13 @@ bool Builtins::IsBuiltinHandle(Handle<HeapObject> maybe_code,
return true;
}
// static
bool Builtins::IsIsolateIndependentBuiltin(Code code) {
Builtin builtin = code.builtin_id();
return Builtins::IsBuiltinId(builtin) &&
Builtins::IsIsolateIndependent(builtin);
}
// static
void Builtins::InitializeIsolateDataTables(Isolate* isolate) {
EmbeddedData embedded_data = EmbeddedData::FromBlob(isolate);
@ -306,7 +313,7 @@ void Builtins::InitializeIsolateDataTables(Isolate* isolate) {
// The entry table.
for (Builtin i = Builtins::kFirst; i <= Builtins::kLast; ++i) {
DCHECK(Builtins::IsBuiltinId(isolate->builtins()->code(i).builtin_id()));
DCHECK(isolate->builtins()->code(i).is_off_heap_trampoline());
DCHECK(!isolate->builtins()->code(i).has_instruction_stream());
isolate_data->builtin_entry_table()[ToInt(i)] =
embedded_data.InstructionStartOfBuiltin(i);
}
@ -331,16 +338,16 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
int i = 0;
HandleScope scope(isolate);
for (; i < ToInt(Builtin::kFirstBytecodeHandler); i++) {
Handle<CodeT> builtin_code(&builtins[i]);
Handle<AbstractCode> code = ToAbstractCode(builtin_code, isolate);
Handle<Code> builtin_code(&builtins[i]);
Handle<AbstractCode> code = Handle<AbstractCode>::cast(builtin_code);
PROFILE(isolate, CodeCreateEvent(LogEventListener::CodeTag::kBuiltin, code,
Builtins::name(FromInt(i))));
}
static_assert(kLastBytecodeHandlerPlusOne == kBuiltinCount);
for (; i < kBuiltinCount; i++) {
Handle<CodeT> builtin_code(&builtins[i]);
Handle<AbstractCode> code = ToAbstractCode(builtin_code, isolate);
Handle<Code> builtin_code(&builtins[i]);
Handle<AbstractCode> code = Handle<AbstractCode>::cast(builtin_code);
interpreter::Bytecode bytecode =
builtin_metadata[i].data.bytecode_and_scale.bytecode;
interpreter::OperandScale scale =
@ -352,87 +359,6 @@ void Builtins::EmitCodeCreateEvents(Isolate* isolate) {
}
}
namespace {
enum TrampolineType { kAbort, kJump };
class OffHeapTrampolineGenerator {
public:
explicit OffHeapTrampolineGenerator(Isolate* isolate)
: isolate_(isolate),
masm_(isolate, AssemblerOptions::DefaultForOffHeapTrampoline(isolate),
CodeObjectRequired::kYes,
ExternalAssemblerBuffer(buffer_, kBufferSize)) {}
CodeDesc Generate(Address off_heap_entry, TrampolineType type) {
// Generate replacement code that simply tail-calls the off-heap code.
DCHECK(!masm_.has_frame());
{
FrameScope scope(&masm_, StackFrame::NO_FRAME_TYPE);
if (type == TrampolineType::kJump) {
masm_.CodeEntry();
masm_.JumpToOffHeapInstructionStream(off_heap_entry);
} else {
DCHECK_EQ(type, TrampolineType::kAbort);
masm_.Trap();
}
}
CodeDesc desc;
masm_.GetCode(isolate_, &desc);
return desc;
}
Handle<HeapObject> CodeObject() { return masm_.CodeObject(); }
private:
Isolate* isolate_;
// Enough to fit the single jmp.
static constexpr int kBufferSize = 256;
byte buffer_[kBufferSize];
MacroAssembler masm_;
};
constexpr int OffHeapTrampolineGenerator::kBufferSize;
} // namespace
// static
Handle<Code> Builtins::GenerateOffHeapTrampolineFor(
Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
bool generate_jump_to_instruction_stream) {
DCHECK_NOT_NULL(isolate->embedded_blob_code());
DCHECK_NE(0, isolate->embedded_blob_code_size());
OffHeapTrampolineGenerator generator(isolate);
CodeDesc desc =
generator.Generate(off_heap_entry, generate_jump_to_instruction_stream
? TrampolineType::kJump
: TrampolineType::kAbort);
return Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
.set_kind_specific_flags(kind_specific_flags)
.set_read_only_data_container(!V8_EXTERNAL_CODE_SPACE_BOOL)
.set_self_reference(generator.CodeObject())
.set_is_executable(generate_jump_to_instruction_stream)
.Build();
}
// static
Handle<ByteArray> Builtins::GenerateOffHeapTrampolineRelocInfo(
Isolate* isolate) {
OffHeapTrampolineGenerator generator(isolate);
// Generate a jump to a dummy address as we're not actually interested in the
// generated instruction stream.
CodeDesc desc = generator.Generate(kNullAddress, TrampolineType::kJump);
Handle<ByteArray> reloc_info = isolate->factory()->NewByteArray(
desc.reloc_size, AllocationType::kReadOnly);
Code::CopyRelocInfoToByteArray(*reloc_info, desc);
return reloc_info;
}
// static
Handle<Code> Builtins::CreateInterpreterEntryTrampolineForProfiling(
Isolate* isolate) {
@ -469,19 +395,13 @@ Handle<Code> Builtins::CreateInterpreterEntryTrampolineForProfiling(
CodeDesc::Verify(&desc);
int kind_specific_flags;
{
CodeT code = isolate->builtins()->code(builtin);
kind_specific_flags =
CodeDataContainerFromCodeT(code).kind_specific_flags(kRelaxedLoad);
}
const int kind_specific_flags =
isolate->builtins()->code(builtin).kind_specific_flags(kRelaxedLoad);
return Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN)
.set_kind_specific_flags(kind_specific_flags)
.set_read_only_data_container(false)
// Mimic the InterpreterEntryTrampoline.
.set_builtin(Builtin::kInterpreterEntryTrampoline)
.set_is_executable(true)
.Build();
}
@ -526,64 +446,6 @@ bool Builtins::AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
return isolate->MayAccess(responsible_context, target_global_proxy);
}
// static
bool Builtins::CodeObjectIsExecutable(Builtin builtin) {
// If the runtime/optimized code always knows when executing a given builtin
// that it is a builtin, then that builtin does not need an executable Code
// object. Such Code objects can go in read_only_space (and can even be
// smaller with no branch instruction), thus saving memory.
// Builtins with JS linkage will always have executable Code objects since
// they can be called directly from jitted code with no way of determining
// that they are builtins at generation time. E.g.
// f = Array.of;
// f(1, 2, 3);
// TODO(delphick): This is probably too loose but for now Wasm can call any JS
// linkage builtin via its Code object. Once Wasm is fixed this can either be
// tighted or removed completely.
if (Builtins::KindOf(builtin) != BCH && HasJSLinkage(builtin)) {
return true;
}
// There are some other non-TF builtins that also have JS linkage like
// InterpreterEntryTrampoline which are explicitly allow-listed below.
// TODO(delphick): Some of these builtins do not fit with the above, but
// currently cause problems if they're not executable. This list should be
// pared down as much as possible.
switch (builtin) {
case Builtin::kInterpreterEntryTrampoline:
case Builtin::kCompileLazy:
case Builtin::kCompileLazyDeoptimizedCode:
case Builtin::kCallFunction_ReceiverIsNullOrUndefined:
case Builtin::kCallFunction_ReceiverIsNotNullOrUndefined:
case Builtin::kCallFunction_ReceiverIsAny:
case Builtin::kCallBoundFunction:
case Builtin::kCall_ReceiverIsNullOrUndefined:
case Builtin::kCall_ReceiverIsNotNullOrUndefined:
case Builtin::kCall_ReceiverIsAny:
case Builtin::kHandleApiCall:
case Builtin::kInstantiateAsmJs:
#if V8_ENABLE_WEBASSEMBLY
case Builtin::kGenericJSToWasmWrapper:
case Builtin::kWasmReturnPromiseOnSuspend:
#endif // V8_ENABLE_WEBASSEMBLY
// TODO(delphick): Remove this when calls to it have the trampoline inlined
// or are converted to use kCallBuiltinPointer.
case Builtin::kCEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit:
return true;
default:
#if V8_TARGET_ARCH_MIPS64
// TODO(Loongson): Move non-JS linkage builtins code objects into RO_SPACE
// caused MIPS platform to crash, and we need some time to handle it. Now
// disable this change temporarily on MIPS platform.
return true;
#else
return false;
#endif // V8_TARGET_ARCH_MIPS64
}
}
Builtin ExampleBuiltinForTorqueFunctionPointerType(
size_t function_pointer_type_id) {
switch (function_pointer_type_id) {

View File

@ -138,17 +138,17 @@ class Builtins {
}
// Convenience wrappers.
Handle<CodeT> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<CodeT> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<CodeT> NonPrimitiveToPrimitive(
Handle<Code> CallFunction(ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<Code> Call(ConvertReceiverMode = ConvertReceiverMode::kAny);
Handle<Code> NonPrimitiveToPrimitive(
ToPrimitiveHint hint = ToPrimitiveHint::kDefault);
Handle<CodeT> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
Handle<Code> OrdinaryToPrimitive(OrdinaryToPrimitiveHint hint);
// Used by CreateOffHeapTrampolines in isolate.cc.
void set_code(Builtin builtin, CodeT code);
void set_code(Builtin builtin, Code code);
V8_EXPORT_PRIVATE CodeT code(Builtin builtin);
V8_EXPORT_PRIVATE Handle<CodeT> code_handle(Builtin builtin);
V8_EXPORT_PRIVATE Code code(Builtin builtin);
V8_EXPORT_PRIVATE Handle<Code> code_handle(Builtin builtin);
static CallInterfaceDescriptor CallInterfaceDescriptorFor(Builtin builtin);
V8_EXPORT_PRIVATE static Callable CallableFor(Isolate* isolate,
@ -173,8 +173,8 @@ class Builtins {
static bool IsCpp(Builtin builtin);
// True, iff the given code object is a builtin. Note that this does not
// necessarily mean that its kind is Code::BUILTIN.
static bool IsBuiltin(const Code code);
// necessarily mean that its kind is InstructionStream::BUILTIN.
static bool IsBuiltin(const InstructionStream code);
// As above, but safe to access off the main thread since the check is done
// by handle location. Similar to Heap::IsRootHandle.
@ -192,12 +192,7 @@ class Builtins {
}
// True, iff the given code object is a builtin with off-heap embedded code.
template <typename CodeOrCodeT>
static bool IsIsolateIndependentBuiltin(CodeOrCodeT code) {
Builtin builtin = code.builtin_id();
return Builtins::IsBuiltinId(builtin) &&
Builtins::IsIsolateIndependent(builtin);
}
static bool IsIsolateIndependentBuiltin(Code code);
static void InitializeIsolateDataTables(Isolate* isolate);
@ -220,34 +215,15 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm, Address builtin_address);
static void Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
bool builtin_exit_frame);
ArgvMode argv_mode, bool builtin_exit_frame);
static bool AllowDynamicFunction(Isolate* isolate, Handle<JSFunction> target,
Handle<JSObject> target_global_proxy);
// Creates a trampoline code object that jumps to the given off-heap entry.
// The result should not be used directly, but only from the related Factory
// function.
// TODO(delphick): Come up with a better name since it may not generate an
// executable trampoline.
static Handle<Code> GenerateOffHeapTrampolineFor(
Isolate* isolate, Address off_heap_entry, int32_t kind_specific_flags,
bool generate_jump_to_instruction_stream);
// Generate the RelocInfo ByteArray that would be generated for an offheap
// trampoline.
static Handle<ByteArray> GenerateOffHeapTrampolineRelocInfo(Isolate* isolate);
// Creates a copy of InterpreterEntryTrampolineForProfiling in the code space.
static Handle<Code> CreateInterpreterEntryTrampolineForProfiling(
Isolate* isolate);
// Only builtins with JS linkage should ever need to be called via their
// trampoline Code object. The remaining builtins have non-executable Code
// objects.
static bool CodeObjectIsExecutable(Builtin builtin);
static bool IsJSEntryVariant(Builtin builtin) {
switch (builtin) {
case Builtin::kJSEntry:
@ -288,10 +264,10 @@ class Builtins {
enum class CallOrConstructMode { kCall, kConstruct };
static void Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<CodeT> code);
Handle<Code> code);
static void Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<CodeT> code);
Handle<Code> code);
enum class InterpreterEntryTrampolineMode {
// The version of InterpreterEntryTrampoline used by default.
@ -335,8 +311,8 @@ class Builtins {
};
V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
// Check for kNoBuiltinId first to abort early when the current Code object
// is not a builtin.
// Check for kNoBuiltinId first to abort early when the current
// InstructionStream object is not a builtin.
return builtin_id != Builtin::kNoBuiltinId &&
(builtin_id == Builtin::kInterpreterEntryTrampoline ||
builtin_id == Builtin::kInterpreterEnterAtBytecode ||
@ -344,8 +320,8 @@ V8_INLINE constexpr bool IsInterpreterTrampolineBuiltin(Builtin builtin_id) {
}
V8_INLINE constexpr bool IsBaselineTrampolineBuiltin(Builtin builtin_id) {
// Check for kNoBuiltinId first to abort early when the current Code object
// is not a builtin.
// Check for kNoBuiltinId first to abort early when the current
// InstructionStream object is not a builtin.
return builtin_id != Builtin::kNoBuiltinId &&
(builtin_id == Builtin::kBaselineOutOfLinePrologue ||
builtin_id == Builtin::kBaselineOutOfLinePrologueDeopt ||

View File

@ -30,13 +30,13 @@ macro IsCell(o: HeapObject): bool {
}
@export
macro IsCode(o: HeapObject): bool {
return Is<Code>(o);
macro IsInstructionStream(o: HeapObject): bool {
return Is<InstructionStream>(o);
}
@export
macro IsCodeDataContainer(o: HeapObject): bool {
return Is<CodeDataContainer>(o);
macro IsCode(o: HeapObject): bool {
return Is<Code>(o);
}
@export

View File

@ -43,7 +43,7 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle<Object> object) {
// All code objects should be loaded through the root register or use
// pc-relative addressing.
DCHECK(!object->IsCode());
DCHECK(!object->IsInstructionStream());
#endif
auto find_result = map_.FindOrInsert(object);
@ -73,7 +73,7 @@ void CheckPreconditionsForPatching(Isolate* isolate,
} // namespace
void BuiltinsConstantsTableBuilder::PatchSelfReference(
Handle<Object> self_reference, Handle<Code> code_object) {
Handle<Object> self_reference, Handle<InstructionStream> code_object) {
CheckPreconditionsForPatching(isolate_, code_object);
DCHECK(self_reference->IsOddball());
DCHECK(Oddball::cast(*self_reference).kind() ==
@ -81,7 +81,7 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference(
uint32_t key;
if (map_.Delete(self_reference, &key)) {
DCHECK(code_object->IsCode());
DCHECK(code_object->IsInstructionStream());
map_.Insert(code_object, key);
}
}

Some files were not shown because too many files have changed in this diff Show More